diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index c2868e0f04..0000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,218 +0,0 @@ -version: 2.1 - -executors: - golang: - docker: - - image: tendermintdev/docker-tendermint-build - working_directory: /go/src/github.com/lazyledger/lazyledger-core - environment: - GOBIN: /tmp/bin - release: - machine: true - environment: - AWS_REGION: us-east-1 - -commands: - run_test: - parameters: - script_path: - type: string - steps: - - attach_workspace: - at: /tmp/bin - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - restore_cache: - name: "Restore go modules cache" - keys: - - go-mod-v1-{{ checksum "go.sum" }} - - run: - name: "Running test" - command: | - bash << parameters.script_path >> -jobs: - setup_dependencies: - executor: golang - steps: - - checkout - - restore_cache: - name: "Restore go modules cache" - keys: - - go-mod-v1-{{ checksum "go.sum" }} - - run: - command: | - mkdir -p /tmp/bin - - run: - name: Cache go modules - command: make go-mod-cache - - run: - name: tools - command: make tools - - run: - name: "Build binaries" - command: make install install_abci - - save_cache: - name: "Save go modules cache" - key: go-mod-v1-{{ checksum "go.sum" }} - paths: - - "/go/pkg/mod" - - save_cache: - name: "Save source code cache" - key: go-src-v1-{{ .Revision }} - paths: - - ".git" - - persist_to_workspace: - root: "/tmp/bin" - paths: - - "." - - prepare_build: - executor: golang - steps: - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - run: - name: Get next release number - command: | - export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`" - echo "Last tag: ${LAST_TAG}" - if [ -z "${LAST_TAG}" ]; then - export LAST_TAG="${CIRCLE_BRANCH}" - echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag." - fi - export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`" - echo "Next tag: ${NEXT_TAG}" - echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source - - run: - name: Build dependencies - command: make tools - - persist_to_workspace: - root: . - paths: - - "release-version.source" - - save_cache: - key: v2-release-deps-{{ checksum "go.sum" }} - paths: - - "/go/pkg/mod" - - build_artifacts: - executor: golang - parallelism: 5 - steps: - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - restore_cache: - name: "Restore release dependencies cache" - keys: - - v2-release-deps-{{ checksum "go.sum" }} - - attach_workspace: - at: /tmp/workspace - - run: - name: Build artifact - command: | - # Setting CIRCLE_TAG because we do not tag the release ourselves. - source /tmp/workspace/release-version.source - if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/lazyledger-core_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/lazyledger-core_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/lazyledger-core_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/lazyledger-core_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 4 ;then export GOOS=linux GOARCH=arm64 && export OUTPUT=build/lazyledger-core_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - - persist_to_workspace: - root: build - paths: - - "*.zip" - - "lazyledger-core_linux_amd64" - - release_artifacts: - executor: golang - steps: - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - attach_workspace: - at: /tmp/workspace - - run: - name: "Deploy to GitHub" - command: | - # Setting CIRCLE_TAG because we do not tag the release ourselves. - source /tmp/workspace/release-version.source - echo "---" - ls -la /tmp/workspace/*.zip - echo "---" - python -u scripts/release_management/sha-files.py - echo "---" - cat /tmp/workspace/SHA256SUMS - echo "---" - export RELEASE_ID="`python -u scripts/release_management/github-draft.py`" - echo "Release ID: ${RELEASE_ID}" - #Todo: Parallelize uploads - export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=linux GOARCH=arm64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}" - python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}" - - # # Test RPC implementation against the swagger documented specs - # contract_tests: - # working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint - # machine: - # image: circleci/classic:latest - # environment: - # GOBIN: /home/circleci/.go_workspace/bin - # GOPATH: /home/circleci/.go_workspace/ - # GOOS: linux - # GOARCH: amd64 - # parallelism: 1 - # steps: - # - checkout - # - run: - # name: Test RPC endpoints against swagger documentation - # command: | - # set -x - # export PATH=~/.local/bin:$PATH - # # install node and dredd - # ./scripts/get_nodejs.sh - # # build the binaries with a proper version of Go - # docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks - # # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use - # go get github.com/snikch/goodman/cmd/goodman - # make contract-tests - -workflows: - version: 2 - - release: - jobs: - - prepare_build: - filters: - branches: - only: - - /v[0-9]+\.[0-9]+/ - - build_artifacts: - requires: - - prepare_build - filters: - branches: - only: - - /v[0-9]+\.[0-9]+/ - - release_artifacts: - requires: - - prepare_build - - build_artifacts - filters: - branches: - only: - - /v[0-9]+\.[0-9]+/ diff --git a/codecov.yml b/.github/codecov.yml similarity index 83% rename from codecov.yml rename to .github/codecov.yml index 4a20b35d4f..ca879ab642 100644 --- a/codecov.yml +++ b/.github/codecov.yml @@ -2,11 +2,6 @@ coverage: precision: 2 round: down range: "70...100" - notify: - after_n_builds: 4 - github_checks: - annotations: false - status: project: default: @@ -14,6 +9,9 @@ coverage: patch: on changes: off +github_checks: + annotations: false + comment: layout: "diff, files" behavior: default diff --git a/.markdownlint.yml b/.github/linter/markdownlint.yml similarity index 100% rename from .markdownlint.yml rename to .github/linter/markdownlint.yml diff --git a/.mergify.yml b/.github/mergify.yml similarity index 100% rename from .mergify.yml rename to .github/mergify.yml diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ea4e28425b..344ac8a185 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -14,29 +14,37 @@ jobs: - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files - run: split -n l/4 --additional-suffix=.txt ./pkgs.txt + run: split -d -n l/4 pkgs.txt pkgs.txt.part. # cache multiple - uses: actions/upload-artifact@v2 with: - name: "${{ github.sha }}-aa" - path: ./xaa.txt + name: "${{ github.sha }}-00" + path: ./pkgs.txt.part.00 - uses: actions/upload-artifact@v2 with: - name: "${{ github.sha }}-ab" - path: ./xab.txt + name: "${{ github.sha }}-01" + path: ./pkgs.txt.part.01 - uses: actions/upload-artifact@v2 with: - name: "${{ github.sha }}-ac" - path: ./xac.txt + name: "${{ github.sha }}-02" + path: ./pkgs.txt.part.02 - uses: actions/upload-artifact@v2 with: - name: "${{ github.sha }}-ad" - path: ./xad.txt + name: "${{ github.sha }}-03" + path: ./pkgs.txt.part.03 - test-coverage-part-1: + build-linux: + name: Build runs-on: ubuntu-latest - needs: split-test-files + strategy: + fail-fast: false + matrix: + goarch: ["arm", "amd64"] + timeout-minutes: 5 steps: + - uses: actions/setup-go@v2 + with: + go-version: "1.15" - uses: actions/checkout@v2 - uses: technote-space/get-diff-action@v4 with: @@ -44,23 +52,21 @@ jobs: **/**.go go.mod go.sum - - uses: actions/download-artifact@v2 - with: - name: "${{ github.sha }}-aa" - if: env.GIT_DIFF - - name: test & coverage report creation - run: | - cat xaa.txt | xargs go test -mod=readonly -timeout 8m -race -coverprofile=coverage.txt -covermode=atomic - if: env.GIT_DIFF - - uses: codecov/codecov-action@v1.0.14 - with: - file: ./coverage.txt - if: env.GIT_DIFF + - name: install + run: GOOS=linux GOARCH=${{ matrix.goarch }} make build + if: "env.GIT_DIFF != ''" - test-coverage-part-2: + tests: runs-on: ubuntu-latest needs: split-test-files + strategy: + fail-fast: false + matrix: + part: ["00", "01", "02", "03"] steps: + - uses: actions/setup-go@v2 + with: + go-version: "1.15" - uses: actions/checkout@v2 - uses: technote-space/get-diff-action@v4 with: @@ -70,20 +76,24 @@ jobs: go.sum - uses: actions/download-artifact@v2 with: - name: "${{ github.sha }}-ab" + name: "${{ github.sha }}-${{ matrix.part }}" if: env.GIT_DIFF + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.15 - name: test & coverage report creation run: | - cat xab.txt | xargs go test -mod=readonly -timeout 5m -race -coverprofile=coverage.txt -covermode=atomic + cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic if: env.GIT_DIFF - - uses: codecov/codecov-action@v1.0.14 + - uses: actions/upload-artifact@v2 with: - file: ./coverage.txt - if: env.GIT_DIFF + name: "${{ github.sha }}-${{ matrix.part }}-coverage" + path: ./${{ matrix.part }}profile.out - test-coverage-part-3: + upload-coverage-report: runs-on: ubuntu-latest - needs: split-test-files + needs: tests steps: - uses: actions/checkout@v2 - uses: technote-space/get-diff-action@v4 @@ -94,37 +104,24 @@ jobs: go.sum - uses: actions/download-artifact@v2 with: - name: "${{ github.sha }}-ac" - if: env.GIT_DIFF - - name: test & coverage report creation - run: | - cat xac.txt | xargs go test -mod=readonly -timeout 10m -race -coverprofile=coverage.txt -covermode=atomic + name: "${{ github.sha }}-00-coverage" if: env.GIT_DIFF - - uses: codecov/codecov-action@v1.0.14 + - uses: actions/download-artifact@v2 with: - file: ./coverage.txt + name: "${{ github.sha }}-01-coverage" if: env.GIT_DIFF - - test-coverage-part-4: - runs-on: ubuntu-latest - needs: split-test-files - steps: - - uses: actions/checkout@v2 - - uses: technote-space/get-diff-action@v4 + - uses: actions/download-artifact@v2 with: - PATTERNS: | - **/**.go - go.mod - go.sum + name: "${{ github.sha }}-02-coverage" + if: env.GIT_DIFF - uses: actions/download-artifact@v2 with: - name: "${{ github.sha }}-ad" + name: "${{ github.sha }}-03-coverage" if: env.GIT_DIFF - - name: test & coverage report creation - run: | - cat xad.txt | xargs go test -mod=readonly -timeout 5m -race -coverprofile=coverage.txt -covermode=atomic + - run: | + cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v1.0.14 + - uses: codecov/codecov-action@v1.0.15 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000000..7d28af7851 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,31 @@ +name: Documentation +# This job builds and deploys documenation to github pages. +# It runs on every push to master. +on: + push: + branches: + - master + +jobs: + build-and-deploy: + runs-on: ubuntu-latest + container: + image: tendermintdev/docker-website-deployment + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v2.3.1 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Install and Build 🔧 + run: | + apk add rsync + make build-docs + + - name: Deploy 🚀 + uses: JamesIves/github-pages-deploy-action@3.7.1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: gh-pages + FOLDER: ~/output diff --git a/.github/workflows/e2e-nightly.yml b/.github/workflows/e2e-nightly.yml index 7a060669b0..d32b54840d 100644 --- a/.github/workflows/e2e-nightly.yml +++ b/.github/workflows/e2e-nightly.yml @@ -16,6 +16,10 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.15' + - uses: actions/checkout@v2 - name: Build @@ -32,14 +36,18 @@ jobs: working-directory: test/e2e run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + e2e-nightly-fail: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: - name: Notify Slack on failure - uses: rtCamp/action-slack-notify@e9db0ef - if: ${{ failure() }} + uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0 env: SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} SLACK_CHANNEL: tendermint-internal - SLACK_USERNAME: Nightly E2E Test Failure + SLACK_USERNAME: Nightly E2E Tests SLACK_ICON_EMOJI: ':skull:' SLACK_COLOR: danger - SLACK_MESSAGE: Nightly E2E test failed (group ${{ matrix.group }}) + SLACK_MESSAGE: Nightly E2E tests failed SLACK_FOOTER: '' diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index fbc7488bd9..ed26718cad 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -13,6 +13,9 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.15' - uses: actions/checkout@v2 - uses: technote-space/get-diff-action@v4 with: diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index f7c5b3e7bb..d8a1e341df 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -7,6 +7,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@master - - uses: gaurav-nelson/github-action-markdown-link-check@1.0.8 + - uses: gaurav-nelson/github-action-markdown-link-check@1.0.11 with: folder-path: "docs" diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 8ece757907..c4098557ee 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -27,6 +27,5 @@ jobs: DEFAULT_BRANCH: master GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VALIDATE_MD: true - MARKDOWN_CONFIG_FILE: .markdownlint.yml VALIDATE_OPAENAPI: true VALIDATE_YAML: true diff --git a/.gitignore b/.gitignore index db0e1f77a8..461beae10e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,49 +1,43 @@ -*.swp -*.swo -.bak *.bak +*.iml +*.log +*.swo +*.swp +*/.glide +*/vendor .DS_Store -build/* -artifacts/* -rpc/test/.tendermint -.tendermint -remote_dump +.bak +.idea/ .revision -vendor +.tendermint +.tendermint-lite +.terraform .vagrant -test/e2e/build -test/e2e/networks/*/ -test/p2p/data/ -test/logs +.vendor-new/ +.vscode/ +abci-cli +addrbook.json +artifacts/* +build/* coverage.txt +docs/.vuepress/dist docs/_build docs/dist -docs/.vuepress/dist -*.log -abci-cli docs/node_modules/ index.html.md - -scripts/wal2json/wal2json -scripts/cutWALUntil/cutWALUntil - -.idea/ -*.iml - -.vscode/ - libs/pubsub/query/fuzz_test/output +profile\.out +remote_dump +rpc/test/.tendermint +scripts/cutWALUntil/cutWALUntil +scripts/wal2json/wal2json shunit2 - -.tendermint-lite -addrbook.json - -*/vendor -.vendor-new/ -*/.glide -.terraform terraform.tfstate terraform.tfstate.backup terraform.tfstate.d - -profile\.out +test/e2e/build +test/e2e/networks/*/ +test/logs +test/maverick/maverick +test/p2p/data/ +vendor diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 3a42e525f1..0000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "protoc": { - "options": [ - "--proto_path=${workspaceRoot}/proto", - "--proto_path=${workspaceRoot}/third_party/proto" - ] - } -} diff --git a/CHANGELOG.md b/CHANGELOG.md index 5dffcffbab..e5d4bbe58f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,6 @@ # Changelog +<<<<<<< HEAD ## v0.34.0-rc5 *October 13, 2020* @@ -34,9 +35,13 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - [privval] \#5441 Fix faulty ping message encoding causing nil message errors in logs (@erikgrinaker) ## v0.34.0-rc4 +======= +## v0.34.0 +>>>>>>> 1547a7e6c12539bfe7d7ba00f9637a455c227b21 -*September 24, 2020* +*November 19, 2020* +<<<<<<< HEAD Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). ### BREAKING CHANGES @@ -130,143 +135,183 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ## v0.34.0-rc2 *July 30, 2020* +======= +Holy smokes, this is a big one! For a more reader-friendly overview of the changes in 0.34.0 +(and of the changes you need to accommodate as a user), check out [UPGRADING.md](UPGRADING.md). +>>>>>>> 1547a7e6c12539bfe7d7ba00f9637a455c227b21 Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio, -@joe-bowman, @cuonglm +@joe-bowman, @cuonglm, @SadPencil and @dongsam. -Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). +And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint). -### BREAKING CHANGES: +### BREAKING CHANGES - CLI/RPC/Config - - [evidence] [\#4959](https://github.com/tendermint/tendermint/issues/4959) Add json tags to `DuplicateVoteEvidence` - - [light] [\#4946](https://github.com/tendermint/tendermint/issues/4946) `tendermint lite` cmd has been renamed to `tendermint light` - - [privval] [\#4582](https://github.com/tendermint/tendermint/issues/4582) `round` in private_validator_state.json is no longer a string in json it is now a number + - [config] [\#5315](https://github.com/tendermint/tendermint/pull/5315) Rename `prof_laddr` to `pprof_laddr` and move it to `rpc` section (@melekes) + - [evidence] [\#4959](https://github.com/tendermint/tendermint/pull/4959) Add JSON tags to `DuplicateVoteEvidence` (@marbar3778) + - [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) `tendermint lite` command has been renamed to `tendermint light` (@marbar3778) + - [privval] [\#4582](https://github.com/tendermint/tendermint/pull/4582) `round` in private_validator_state.json is no longer JSON string; instead it is a number (@marbar3778) - [rpc] [\#4792](https://github.com/tendermint/tendermint/pull/4792) `/validators` are now sorted by voting power (@melekes) - - [rpc] [\#4937](https://github.com/tendermint/tendermint/issues/4937) Return an error when `page` pagination param is 0 in `/validators`, `tx_search` (@melekes) - - [rpc] [\#5137](https://github.com/tendermint/tendermint/issues/5137) The json tags of `gasWanted` & `gasUsed` in `ResponseCheckTx` & `ResponseDeliverTx` have been made snake_case. (`gas_wanted` & `gas_used`) + - [rpc] [\#4947](https://github.com/tendermint/tendermint/pull/4947) Return an error when `page` pagination param is 0 in `/validators`, `tx_search` (@melekes) + - [rpc] [\#5137](https://github.com/tendermint/tendermint/pull/5137) JSON tags of `gasWanted` and `gasUsed` in `ResponseCheckTx` and `ResponseDeliverTx` have been made snake_case (`gas_wanted` and `gas_used`) (@marbar3778) + - [rpc] [\#5315](https://github.com/tendermint/tendermint/pull/5315) Remove `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and `/unsafe_write_heap_profile`. Please use pprof functionality instead (@melekes) + - [rpc/client, rpc/jsonrpc/client] [\#5347](https://github.com/tendermint/tendermint/pull/5347) All client methods now accept `context.Context` as 1st param (@melekes) - Apps - - [abci] [\#4704](https://github.com/tendermint/tendermint/pull/4704) Add ABCI methods `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` for state sync snapshots. `ABCIVersion` bumped to 0.17.0. - - [abci] [\#4989](https://github.com/tendermint/tendermint/issues/4989) `Proof` within `ResponseQuery` has been renamed to `ProofOps` - - [abci] `CheckTxType` Protobuf enum names are now uppercase, to follow Protobuf style guide + - [abci] [\#4704](https://github.com/tendermint/tendermint/pull/4704) Add ABCI methods `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` for state sync snapshots. `ABCIVersion` bumped to 0.17.0. (@erikgrinaker) + - [abci] [\#4989](https://github.com/tendermint/tendermint/pull/4989) `Proof` within `ResponseQuery` has been renamed to `ProofOps` (@marbar3778) + - [abci] [\#5096](https://github.com/tendermint/tendermint/pull/5096) `CheckTxType` Protobuf enum names are now uppercase, to follow Protobuf style guide (@erikgrinaker) + - [abci] [\#5324](https://github.com/tendermint/tendermint/pull/5324) ABCI evidence type is now an enum with two types of possible evidence (@cmwaters) - P2P Protocol - - [blockchain] [\#4637](https://github.com/tendermint/tendermint/issues/4637) Migrate blockchain reactor(s) to Protobuf encoding - - [evidence] [\#4949](https://github.com/tendermint/tendermint/issues/4949) Migrate evidence reactor to Protobuf encoding - - [mempool] [\#4940](https://github.com/tendermint/tendermint/issues/4940) Migrate mempool from to Protobuf encoding - - [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/issues/4973) Migrate `p2p/pex` reactor to Protobuf encoding - - [statesync] [\#4943](https://github.com/tendermint/tendermint/issues/4943) Migrate state sync reactor to Protobuf encoding + - [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778) + - [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778) + - [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778) + - [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes) + - `MaxBatchBytes` new config setting defines the max size of one batch. + - [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778) + - [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778) - Blockchain Protocol - - [evidence] [\#4780](https://github.com/tendermint/tendermint/pull/4780) Cap evidence to an absolute number (@cmwaters) - - Add `max_num` to consensus evidence parameters (default: 50 items). - - [evidence] [\#4725](https://github.com/tendermint/tendermint/issues/4725) Remove `Pubkey` from `DuplicateVoteEvidence` - - [state] [\#4845](https://github.com/tendermint/tendermint/issues/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes) + - [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778) + - [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters) + - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker) + - [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes) - [types] [\#4792](https://github.com/tendermint/tendermint/pull/4792) Sort validators by voting power to enable faster commit verification (@melekes) - On-disk serialization - - [state] [\#4679](https://github.com/tendermint/tendermint/issues/4679) Migrate state module to Protobuf encoding + - [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) Migrate state module to Protobuf encoding (@marbar3778) - `BlockStoreStateJSON` is now `BlockStoreState` and is encoded as binary in the database - - [store] [\#4778](https://github.com/tendermint/tendermint/issues/4778) Migrate store module to Protobuf encoding + - [store] [\#4778](https://github.com/tendermint/tendermint/pull/4778) Migrate store module to Protobuf encoding (@marbar3778) - Light client, private validator - - [light] [\#4964](https://github.com/tendermint/tendermint/issues/4964) Migrate light module migration to Protobuf encoding - - [privval] [\#4985](https://github.com/tendermint/tendermint/issues/4985) Migrate `privval` module to Protobuf encoding + - [light] [\#4964](https://github.com/tendermint/tendermint/pull/4964) Migrate light module migration to Protobuf encoding (@marbar3778) + - [privval] [\#4985](https://github.com/tendermint/tendermint/pull/4985) Migrate `privval` module to Protobuf encoding (@marbar3778) - Go API - - [light] [\#4946](https://github.com/tendermint/tendermint/issues/4946) Rename `lite2` pkg to `light`. Remove `lite` implementation. + - [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) RoundState: `Round`, `LockedRound` & `CommitRound` are now `int32` (@marbar3778) + - [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) HeightVoteSet: `round` is now `int32` (@marbar3778) - [crypto] [\#4721](https://github.com/tendermint/tendermint/pull/4721) Remove `SimpleHashFromMap()` and `SimpleProofsFromMap()` (@erikgrinaker) - - [crypto] [\#4940](https://github.com/tendermint/tendermint/issues/4940) All keys have become `[]byte` instead of `[]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data. - - [crypto] \4988 Removal of key type multisig + - [crypto] [\#4940](https://github.com/tendermint/tendermint/pull/4940) All keys have become `[]byte` instead of `[]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data. (@marbar3778) + - [crypto] [\#4988](https://github.com/tendermint/tendermint/pull/4988) Removal of key type multisig (@marbar3778) - The key has been moved to the [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go) - - [crypto] [\#4989](https://github.com/tendermint/tendermint/issues/4989) Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`. + - [crypto] [\#4989](https://github.com/tendermint/tendermint/pull/4989) Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`. (@marbar3778) - `merkle.Proof` has been renamed to `ProofOps`. - Protobuf messages `Proof` & `ProofOp` has been moved to `proto/crypto/merkle` - `SimpleHashFromByteSlices` has been renamed to `HashFromByteSlices` - `SimpleHashFromByteSlicesIterative` has been renamed to `HashFromByteSlicesIterative` - `SimpleProofsFromByteSlices` has been renamed to `ProofsFromByteSlices` - - [crypto] [\#4941](https://github.com/tendermint/tendermint/issues/4941) Remove suffixes from all keys. + - [crypto] [\#4941](https://github.com/tendermint/tendermint/pull/4941) Remove suffixes from all keys. (@marbar3778) - ed25519: type `PrivKeyEd25519` is now `PrivKey` - ed25519: type `PubKeyEd25519` is now `PubKey` - secp256k1: type`PrivKeySecp256k1` is now `PrivKey` - secp256k1: type`PubKeySecp256k1` is now `PubKey` - sr25519: type `PrivKeySr25519` is now `PrivKey` - sr25519: type `PubKeySr25519` is now `PubKey` - - multisig: type `PubKeyMultisigThreshold` is now `PubKey` - - [libs] [\#4831](https://github.com/tendermint/tendermint/issues/4831) Remove `Bech32` pkg from Tendermint. This pkg now lives in the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32) - - [rpc/client] [\#4947](https://github.com/tendermint/tendermint/issues/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes) - - `UnconfirmedTxs` `limit` param is a pointer - - [proto] [\#5025](https://github.com/tendermint/tendermint/issues/5025) All proto files have been moved to `/proto` directory. + - [crypto] [\#5214](https://github.com/tendermint/tendermint/pull/5214) Change `GenPrivKeySecp256k1` to `GenPrivKeyFromSecret` to be consistent with other keys (@marbar3778) + - [crypto] [\#5236](https://github.com/tendermint/tendermint/pull/5236) `VerifyBytes` is now `VerifySignature` on the `crypto.PubKey` interface (@marbar3778) + - [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and change evidence interface (@cmwaters) + - [libs] [\#4831](https://github.com/tendermint/tendermint/pull/4831) Remove `Bech32` pkg from Tendermint. This pkg now lives in the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32) (@marbar3778) + - [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) Rename `lite2` pkg to `light`. Remove `lite` implementation. (@marbar3778) + - [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes) + - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker) + - [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778) - Using the recommended the file layout from buf, [see here for more info](https://buf.build/docs/lint-checkers#file_layout) - - [state] [\#4679](https://github.com/tendermint/tendermint/issues/4679) `TxResult` is a Protobuf type defined in `abci` types directory - - [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) `SignedMsgType` has moved to a Protobuf enum types - - [types] [\#4962](https://github.com/tendermint/tendermint/issues/4962) `ConsensusParams`, `BlockParams`, `EvidenceParams`, `ValidatorParams` & `HashedParams` are now Protobuf types - - [types] [\#4852](https://github.com/tendermint/tendermint/issues/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` - - [types] [\#4798](https://github.com/tendermint/tendermint/issues/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes) - - [types] [\#4845](https://github.com/tendermint/tendermint/issues/4845) Remove `ABCIResult` - - [types] [\#5029](https://github.com/tendermint/tendermint/issues/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency - - [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` - - [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) Vote: `ValidatorIndex` & `Round` are now `int32` - - [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) Proposal: `POLRound` & `Round` are now `int32` - - [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) Block: `Round` is now `int32` - - [consensus] [\#4582](https://github.com/tendermint/tendermint/issues/4582) RoundState: `Round`, `LockedRound` & `CommitRound` are now `int32` - - [consensus] [\#4582](https://github.com/tendermint/tendermint/issues/4582) HeightVoteSet: `round` is now `int32` - - [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/issues/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes) + - [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes) + - `UnconfirmedTxs` `limit` param is a pointer + - [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes) + - [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) `TxResult` is a Protobuf type defined in `abci` types directory (@marbar3778) + - [state] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `State.InitialHeight` field to record initial block height, must be `1` (not `0`) to start from 1 (@erikgrinaker) + - [state] [\#5231](https://github.com/tendermint/tendermint/pull/5231) `LoadStateFromDBOrGenesisFile()` and `LoadStateFromDBOrGenesisDoc()` no longer saves the state in the database if not found, the genesis state is simply returned (@erikgrinaker) + - [state] [\#5348](https://github.com/tendermint/tendermint/pull/5348) Define an Interface for the state store. (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `SignedMsgType` has moved to a Protobuf enum types (@marbar3778) + - [types] [\#4962](https://github.com/tendermint/tendermint/pull/4962) `ConsensusParams`, `BlockParams`, `EvidenceParams`, `ValidatorParams` & `HashedParams` are now Protobuf types (@marbar3778) + - [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778) + - [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes) + - [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes) + - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Block: `Round` is now `int32` (@marbar3778) -### FEATURES: +### FEATURES -- [abci] [\#5031](https://github.com/tendermint/tendermint/issues/5031) Add `AppVersion` to consensus parameters (@james-ray) - - ... making it possible to update your ABCI application version via `EndBlock` response +- [abci] [\#5031](https://github.com/tendermint/tendermint/pull/5031) Add `AppVersion` to consensus parameters (@james-ray) + - This makes it possible to update your ABCI application version via `EndBlock` response +- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters) +- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker) +- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker) +- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam) +- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker) - [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes) -- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia evidence can be detected, verified and committed (@cmwaters) -- [light] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Submit conflicting headers, if any, to a full node & all witnesses (@melekes) -- [p2p] [\#4981](https://github.com/tendermint/tendermint/issues/4981) Expose `SaveAs` func on NodeKey (@melekes) +- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters) +- [genesis] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `initial_height` field to specify the initial chain height (defaults to `1`) (@erikgrinaker) +- [libs/math] [\#5665](https://github.com/tendermint/tendermint/pull/5665) Make fractions unsigned integers (uint64) (@cmwaters) +- [light] [\#5298](https://github.com/tendermint/tendermint/pull/5298) Morph validator set and signed header into light block (@cmwaters) +- [p2p] [\#4981](https://github.com/tendermint/tendermint/pull/4981) Expose `SaveAs` func on NodeKey (@melekes) +- [privval] [\#5239](https://github.com/tendermint/tendermint/pull/5239) Add `chainID` to requests from client. (@marbar3778) - [rpc] [\#4532](https://github.com/tendermint/tendermint/pull/4923) Support `BlockByHash` query (@fedekunze) -- [rpc] [\#4979](https://github.com/tendermint/tendermint/issues/4979) Support EXISTS operator in `/tx_search` query (@melekes) -- [rpc] [\#5017](https://github.com/tendermint/tendermint/issues/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes) -- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section. +- [rpc] [\#4979](https://github.com/tendermint/tendermint/pull/4979) Support EXISTS operator in `/tx_search` query (@melekes) +- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes) - [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters) +- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section. +- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters) -### IMPROVEMENTS: +### IMPROVEMENTS -- [consensus] [\#4578](https://github.com/tendermint/tendermint/issues/4578) Attempt to repair the consensus WAL file (`data/cs.wal/wal`) automatically in case of corruption (@alessio) +- [blockchain] [\#5278](https://github.com/tendermint/tendermint/pull/5278) Verify only +2/3 of the signatures in a block when fast syncing. (@marbar3778) +- [consensus] [\#4578](https://github.com/tendermint/tendermint/pull/4578) Attempt to repair the consensus WAL file (`data/cs.wal/wal`) automatically in case of corruption (@alessio) - The original WAL file will be backed up to `data/cs.wal/wal.CORRUPTED`. -- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Improved evidence db (@cmwaters) +- [consensus] [\#5143](https://github.com/tendermint/tendermint/pull/5143) Only call `privValidator.GetPubKey` once per block (@melekes) +- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters) - [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters) -- [evidence] [\#4892](https://github.com/tendermint/tendermint/pull/4892) Remove redundant header from phantom validator evidence (@cmwaters) +- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters) +- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778) - [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes) -- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) compare header w/ witnesses only when doing bisection (@melekes) -- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) validate basic for inbound validator sets and headers before further processing them (@cmwaters) -- [p2p/conn] [\#4795](https://github.com/tendermint/tendermint/issues/4795) Return err on `signChallenge()` instead of panic +- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes) +- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters) +- [mempool] Add RemoveTxByKey() exported function for custom mempool cleaning (@p4u) +- [p2p/conn] [\#4795](https://github.com/tendermint/tendermint/pull/4795) Return err on `signChallenge()` instead of panic +- [privval] [\#5437](https://github.com/tendermint/tendermint/pull/5437) `NewSignerDialerEndpoint` can now be given `SignerServiceEndpointOption` (@erikgrinaker) +- [rpc] [\#4968](https://github.com/tendermint/tendermint/pull/4968) JSON encoding is now handled by `libs/json`, not Amino (@erikgrinaker) +- [rpc] [\#5293](https://github.com/tendermint/tendermint/pull/5293) `/dial_peers` has added `private` and `unconditional` as parameters. (@marbar3778) - [state] [\#4781](https://github.com/tendermint/tendermint/pull/4781) Export `InitStateVersion` for the initial state version (@erikgrinaker) - [txindex] [\#4466](https://github.com/tendermint/tendermint/pull/4466) Allow to index an event at runtime (@favadi) - `abci.EventAttribute` replaces `KV.Pair` -- [libs] [\#5126](https://github.com/tendermint/tendermint/issues/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking - [types] [\#4905](https://github.com/tendermint/tendermint/pull/4905) Add `ValidateBasic` to validator and validator set (@cmwaters) -- [rpc] [\#4968](https://github.com/tendermint/tendermint/issues/4968) JSON encoding is now handled by `libs/json`, not Amino -- [mempool] Add RemoveTxByKey() exported function for custom mempool cleaning (@p4u) -- [consensus] [\#5143](https://github.com/tendermint/tendermint/issues/5143) Only call `privValidator.GetPubKey` once per block (@melekes) +- [types] [\#5340](https://github.com/tendermint/tendermint/pull/5340) Add check in `Header.ValidateBasic()` for block protocol version (@marbar3778) +- [types] [\#5490](https://github.com/tendermint/tendermint/pull/5490) Use `Commit` and `CommitSig` max sizes instead of vote max size to calculate the maximum block size. (@cmwaters) -### BUG FIXES: -- [blockchain/v2] Correctly set block store base in status responses (@erikgrinaker) -- [consensus] [\#4895](https://github.com/tendermint/tendermint/pull/4895) Cache the address of the validator to reduce querying a remote KMS (@joe-bowman) -- [consensus] [\#4970](https://github.com/tendermint/tendermint/issues/4970) Stricter on `LastCommitRound` check (@cuonglm) -- [p2p][\#5136](https://github.com/tendermint/tendermint/pull/5136) Fix error for peer with the same ID but different IPs (@valardragon) -- [proxy] [\#5078](https://github.com/tendermint/tendermint/issues/5078) Fix a bug, where TM does not exit when ABCI app crashes (@melekes) - - -## v0.34.0-rc1 +### BUG FIXES -This release was removed, as a premature GitHub tag was recorded on sum.golang.org causing checksum errors. +- [abci/grpc] [\#5520](https://github.com/tendermint/tendermint/pull/5520) Return async responses in order, to avoid mempool panics. (@erikgrinaker) +- [blockchain/v2] [\#4971](https://github.com/tendermint/tendermint/pull/4971) Correctly set block store base in status responses (@erikgrinaker) +- [blockchain/v2] [\#5499](https://github.com/tendermint/tendermint/pull/5499) Fix "duplicate block enqueued by processor" panic (@melekes) +- [blockchain/v2] [\#5530](https://github.com/tendermint/tendermint/pull/5530) Fix out of order block processing panic (@melekes) +- [blockchain/v2] [\#5553](https://github.com/tendermint/tendermint/pull/5553) Make the removal of an already removed peer a noop (@melekes) +- [consensus] [\#4895](https://github.com/tendermint/tendermint/pull/4895) Cache the address of the validator to reduce querying a remote KMS (@joe-bowman) +- [consensus] [\#4970](https://github.com/tendermint/tendermint/pull/4970) Don't allow `LastCommitRound` to be negative (@cuonglm) +- [consensus] [\#5329](https://github.com/tendermint/tendermint/pull/5329) Fix wrong proposer schedule for validators returned by `InitChain` (@erikgrinaker) +- [docker] [\#5385](https://github.com/tendermint/tendermint/pull/5385) Fix incorrect `time_iota_ms` default setting causing block timestamp drift (@erikgrinaker) +- [evidence] [\#5170](https://github.com/tendermint/tendermint/pull/5170) Change ABCI evidence time to the time the infraction happened not the time the evidence was committed on the block (@cmwaters) +- [evidence] [\#5610](https://github.com/tendermint/tendermint/pull/5610) Make it possible for ABCI evidence to be formed from Tendermint evidence (@cmwaters) +- [libs/rand] [\#5215](https://github.com/tendermint/tendermint/pull/5215) Fix out-of-memory error on unexpected argument of Str() (@SadPencil) +- [light] [\#5307](https://github.com/tendermint/tendermint/pull/5307) Persist correct proposer priority in light client validator sets (@cmwaters) +- [p2p] [\#5136](https://github.com/tendermint/tendermint/pull/5136) Fix error for peer with the same ID but different IPs (@valardragon) +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [proxy] [\#5078](https://github.com/tendermint/tendermint/pull/5078) Force Tendermint to exit when ABCI app crashes (@melekes) +- [rpc] [\#5660](https://github.com/tendermint/tendermint/pull/5660) Set `application/json` as the `Content-Type` header in RPC responses. (@alexanderbez) +- [store] [\#5382](https://github.com/tendermint/tendermint/pull/5382) Fix race conditions when loading/saving/pruning blocks (@erikgrinaker) ## v0.33.8 diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 148f782207..a8fa6bb8f4 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -9,12 +9,23 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ### BREAKING CHANGES - CLI/RPC/Config + - [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) + - [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes) + - [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) + - [cli] \#5777 use hypen-case instead of snake_case for all cli comamnds and config parameters - Apps + - [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface + - [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`. - P2P Protocol - Go API + - [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) + - [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker) + - [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes) + +- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) - Blockchain Protocol @@ -22,10 +33,25 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ### IMPROVEMENTS -- [statesync] \#5516 Check that all heights necessary to rebuild state for a snapshot exist before adding the snapshot to the pool. (@erikgrinaker) - ### BUG FIXES - [types] /#97 Fixes a typo that causes the row roots of the datasquare to be included in the DataAvailabilty header twice. (@evan-forbes) -- [types] /#114 Fixes a typo to map the length of row roots and column roots to correct variables and mitigate confusion. (@raneet10) \ No newline at end of file +- [types] /#114 Fixes a typo to map the length of row roots and column roots to correct variables and mitigate confusion. (@raneet10) +- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778) +- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. +- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes) +- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) +- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) +- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes) +- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes) +- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes) +- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes) + +### BUG FIXES + +- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778) +- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes) +- [crypto] \#5707 Fix infinite recursion in string formatting of Secp256k1 keys (@erikgrinaker) +- [blockchain/v1] \#5711 Fix deadlock (@melekes) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d24ef3eea8..f6982c40de 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -106,12 +106,12 @@ specify exactly the dependency you want to update, eg. We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core. -For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. +For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. There are two ways to generate your proto stubs. 1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker` -2. Run `make proto-gen` after installing `protoc` and gogoproto, you can do this by running `make protobuf`. +2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`. ### Installation Instructions @@ -127,18 +127,19 @@ make install You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files. -## Vagrant +### Visual Studio Code -If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started -hacking Tendermint with the commands below. +If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`: -NOTE: In case you installed Vagrant in 2017, you might need to run -`vagrant box update` to upgrade to the latest `ubuntu/xenial64`. - -```sh -vagrant up -vagrant ssh -make test +```json +{ + "protoc": { + "options": [ + "--proto_path=${workspaceRoot}/proto", + "--proto_path=${workspaceRoot}/third_party/proto" + ] + } +} ``` ## Changelog @@ -246,10 +247,40 @@ Each PR should have one commit once it lands on `master`; this can be accomplish #### Major Release +This major release process assumes that this release was preceded by release candidates. +If there were no release candidates, and you'd like to cut a major release directly from master, see below. + +1. Start on the latest RC branch (`RCx/vX.X.0`). +2. Run integration tests. +3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release: + - "Squash" changes from the changelog entries for the RCs into a single entry, + and add all changes included in `CHANGELOG_PENDING.md`. + (Squashing includes both combining all entries, as well as removing or simplifying + any intra-RC changes. It may also help to alphabetize the entries by package name.) + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary + - Add any release notes you would like to be added to the body of the release to `release_notes.md`. +4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`). +5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch: + - `git checkout RCx/vX.X.0` + - `git checkout -b release/vX.X.0` +6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`. + - `git tag -a vX.X.0 -m 'Release vX.X.0'` + - `git push origin vX.X.0` +7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this + new major release series. + +##### Major Release (from `master`) + 1. Start on `master` 2. Run integration tests (see `test_integrations` in Makefile) 3. Prepare release in a pull request against `master` (to be squash merged): - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release had release candidates, squash all the RC updates into one - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues @@ -257,53 +288,51 @@ Each PR should have one commit once it lands on `master`; this can be accomplish release, and add the github aliases of external contributors to the top of the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - Reset the `CHANGELOG_PENDING.md` - - Bump Tendermint version in `version.go` - Bump P2P and block protocol versions in `version.go`, if necessary - Bump ABCI protocol version in `version.go`, if necessary - Make sure all significant breaking changes are covered in `UPGRADING.md` -4. Push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`) -5. Merge back to master (don't squash merge!) + - Add any release notes you would like to be added to the body of the release to `release_notes.md`. +4. Push a tag with prepared release details (this will trigger the release `vX.X.0`) + - `git tag -a vX.X.x -m 'Release vX.X.x'` + - `git push origin vX.X.x` +5. Update the `CHANGELOG.md` file on master with the releases changelog. 6. Delete any RC branches and tags for this release (if applicable) -#### Minor Release +#### Minor Release (Point Releases) -Minor releases are done differently from major releases: They are built off of long-lived release candidate branches, rather than from master. +Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master. +Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and +the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case). -1. Checkout the long-lived release candidate branch: `git checkout rcX/vX.X.X` +As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches. + +Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate. + +To create a minor release: + +1. Checkout the long-lived backport branch: `git checkout vX.X.x` 2. Run integration tests: `make test_integrations` -3. Prepare the release: - - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues - - run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - reset the `CHANGELOG_PENDING.md` - - bump Tendermint version in `version.go` - - bump P2P and block protocol versions in `version.go`, if necessary - - bump ABCI protocol version in `version.go`, if necessary - - make sure all significant breaking changes are covered in `UPGRADING.md` -4. Create a release branch `release/vX.X.x` off the release candidate branch: - - `git checkout -b release/vX.X.x` - - `git push -u origin release/vX.X.x` - - Note that all branches prefixed with `release` are protected once pushed. You will need admin help to make any changes to the branch. -5. Open a pull request of the new minor release branch onto the latest major release branch `vX.X` and then rebase to merge. This will start the release process. +3. Check out a new branch and prepare the release: + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues + - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - Reset the `CHANGELOG_PENDING.md` + - Bump the ABCI version number, if necessary. + (Note that ABCI follows semver, and that ABCI versions are the only versions + which can change during minor releases, and only field additions are valid minor changes.) + - Add any release notes you would like to be added to the body of the release to `release_notes.md`. +4. Open a PR with these changes that will land them back on `vX.X.x` +5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. + - `git tag -a vX.X.x -m 'Release vX.X.x'` + - `git push origin vX.X.x` 6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - Remove all `R:minor` labels from the pull requests that were included in the release. - - Do not merge the release branch into master. -7. Delete the former long lived release candidate branch once the release has been made. -8. Create a new release candidate branch to be used for the next release. - -#### Backport Release - -1. Start from the existing release branch you want to backport changes to (e.g. v0.30) - Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7) -2. Cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed) -3. Follow steps 2 and 3 from [Major Release](#major-release) -4. Push changes to release/vX.X.X branch -5. Open a PR against the existing vX.X branch + - Do not merge the backport branch into master. -#### Release Candidates +#### Release Candidates -Before creating an official release, especially a major release, we may want to create a -release candidate (RC) for our friends and partners to test out. We use git tags to +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to create RCs, and we build them off of RC branches. RC branches typically have names formatted like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`). diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 67aa3663ff..a4ce89e479 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -40,7 +40,7 @@ ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain COPY ./docker-entrypoint.sh /usr/local/bin/ ENTRYPOINT ["docker-entrypoint.sh"] -CMD ["node"] +CMD ["start"] # Expose the data directory as a volume since there's mutable state in there VOLUME [ "$TMHOME" ] diff --git a/DOCKER/docker-entrypoint.sh b/DOCKER/docker-entrypoint.sh index 0cb45c58a9..d74511c17e 100755 --- a/DOCKER/docker-entrypoint.sh +++ b/DOCKER/docker-entrypoint.sh @@ -6,11 +6,11 @@ if [ ! -d "$TMHOME/config" ]; then tendermint init sed -i \ - -e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \ + -e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \ -e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \ - -e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \ - -e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \ - -e 's/^index_all_tags\s*=.*/index_all_tags = true/' \ + -e 's/^addr-book-strict\s*=.*/addr-book-strict = false/' \ + -e 's/^timeout-commit\s*=.*/timeout-commit = "500ms"/' \ + -e 's/^index-all-tags\s*=.*/index-all-tags = true/' \ -e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \ -e 's/^prometheus\s*=.*/prometheus = true/' \ "$TMHOME/config/config.toml" diff --git a/Makefile b/Makefile index 590c73610c..a078c87244 100644 --- a/Makefile +++ b/Makefile @@ -4,9 +4,17 @@ PACKAGES=$(shell go list ./...) BUILDDIR ?= $(CURDIR)/build BUILD_TAGS?=tendermint -LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` + +# If building a release, please checkout the version tag to get the correct version setting +ifneq ($(shell git symbolic-ref -q --short HEAD),) +VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD) +else +VERSION := $(shell git describe) +endif + +LD_FLAGS = -X github.com/lazyledger/lazyledger-core/version.TMCoreSemVer=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" -HTTPS_GIT := https://github.com/tendermint/tendermint.git +HTTPS_GIT := https://github.com/lazyledger/lazyledger-core.git DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf CGO_ENABLED ?= 0 @@ -51,8 +59,8 @@ all: check build test install .PHONY: all # The below include contains the tools. -include tools.mk -include tests.mk +include tools/Makefile +include test/Makefile ############################################################################### ### Build Tendermint ### @@ -191,7 +199,7 @@ DESTINATION = ./index.html.md ############################################################################### ### Documentation ### ############################################################################### - +# todo remove once tendermint.com DNS is solved build-docs: @cd docs && \ while read -r branch path_prefix; do \ @@ -202,14 +210,6 @@ build-docs: done < versions ; .PHONY: build-docs -sync-docs: - cd ~/output && \ - echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \ - echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \ - aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \ - aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ; -.PHONY: sync-docs - ############################################################################### ### Docker image ### ############################################################################### diff --git a/PHILOSOPHY.md b/PHILOSOPHY.md deleted file mode 100644 index a0ee61c35c..0000000000 --- a/PHILOSOPHY.md +++ /dev/null @@ -1,158 +0,0 @@ -# Design goals - -The design goals for Tendermint (and the SDK and related libraries) are: - -* Simplicity and Legibility -* Parallel performance, namely ability to utilize multicore architecture -* Ability to evolve the codebase bug-free -* Debuggability -* Complete correctness that considers all edge cases, esp in concurrency -* Future-proof modular architecture, message protocol, APIs, and encapsulation - - -## Justification - -Legibility is key to maintaining bug-free software as it evolves toward more -optimizations, more ease of debugging, and additional features. - -It is too easy to introduce bugs over time by replacing lines of code with -those that may panic, which means ideally locks are unlocked by defer -statements. - -For example, - -```go -func (obj *MyObj) something() { - mtx.Lock() - obj.something = other - mtx.Unlock() -} -``` - -It is too easy to refactor the codebase in the future to replace `other` with -`other.String()` for example, and this may introduce a bug that causes a -deadlock. So as much as reasonably possible, we need to be using defer -statements, even though it introduces additional overhead. - -If it is necessary to optimize the unlocking of mutex locks, the solution is -more modularity via smaller functions, so that defer'd unlocks are scoped -within a smaller function. - -Similarly, idiomatic for-loops should always be preferred over those that use -custom counters, because it is too easy to evolve the body of a for-loop to -become more complicated over time, and it becomes more and more difficult to -assess the correctness of such a for-loop by visual inspection. - - -## On performance - -It doesn't matter whether there are alternative implementations that are 2x or -3x more performant, when the software doesn't work, deadlocks, or if bugs -cannot be debugged. By taking advantage of multicore concurrency, the -Tendermint implementation will at least be an order of magnitude within the -range of what is theoretically possible. The design philosophy of Tendermint, -and the choice of Go as implementation language, is designed to make Tendermint -implementation the standard specification for concurrent BFT software. - -By focusing on the message protocols (e.g. ABCI, p2p messages), and -encapsulation e.g. IAVL module, (relatively) independent reactors, we are both -implementing a standard implementation to be used as the specification for -future implementations in more optimizable languages like Rust, Java, and C++; -as well as creating sufficiently performant software. Tendermint Core will -never be as fast as future implementations of the Tendermint Spec, because Go -isn't designed to be as fast as possible. The advantage of using Go is that we -can develop the whole stack of modular components **faster** than in other -languages. - -Furthermore, the real bottleneck is in the application layer, and it isn't -necessary to support more than a sufficiently decentralized set of validators -(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS). - -Instead of optimizing Tendermint performance down to the metal, lets focus on -optimizing on other matters, namely ability to push feature complete software -that works well enough, can be debugged and maintained, and can serve as a spec -for future implementations. - - -## On encapsulation - -In order to create maintainable, forward-optimizable software, it is critical -to develop well-encapsulated objects that have well understood properties, and -to re-use these easy-to-use-correctly components as building blocks for further -encapsulated meta-objects. - -For example, mutexes are cheap enough for Tendermint's design goals when there -isn't goroutine contention, so it is encouraged to create concurrency safe -structures with struct-level mutexes. If they are used in the context of -non-concurrent logic, then the performance is good enough. If they are used in -the context of concurrent logic, then it will still perform correctly. - -Examples of this design principle can be seen in the types.ValidatorSet struct, -and the rand.Rand struct. It's one single struct declaration that can be used -in both concurrent and non-concurrent logic, and due to its well encapsulation, -it's easy to get the usage of the mutex right. - -### example: rand.Rand - -`The default Source is safe for concurrent use by multiple goroutines, but -Sources created by NewSource are not`. The reason why the default -package-level source is safe for concurrent use is because it is protected (see -`lockedSource` in ). - -But we shouldn't rely on the global source, we should be creating our own -Rand/Source instances and using them, especially for determinism in testing. -So it is reasonable to have rand.Rand be protected by a mutex. Whether we want -our own implementation of Rand is another question, but the answer there is -also in the affirmative. Sometimes you want to know where Rand is being used -in your code, so it becomes a simple matter of dropping in a log statement to -inject inspectability into Rand usage. Also, it is nice to be able to extend -the functionality of Rand with custom methods. For these reasons, and for the -reasons which is outlined in this design philosophy document, we should -continue to use the rand.Rand object, with mutex protection. - -Another key aspect of good encapsulation is the choice of exposed vs unexposed -methods. It should be clear to the reader of the code, which methods are -intended to be used in what context, and what safe usage is. Part of this is -solved by hiding methods via unexported methods. Another part of this is -naming conventions on the methods (e.g. underscores) with good documentation, -and code organization. If there are too many exposed methods and it isn't -clear what methods have what side effects, then there is something wrong about -the design of abstractions that should be revisited. - - -## On concurrency - -In order for Tendermint to remain relevant in the years to come, it is vital -for Tendermint to take advantage of multicore architectures. Due to the nature -of the problem, namely consensus across a concurrent p2p gossip network, and to -handle RPC requests for a large number of consuming subscribers, it is -unavoidable for Tendermint development to require expertise in concurrency -design, especially when it comes to the reactor design, and also for RPC -request handling. - - -# Guidelines - -Here are some guidelines for designing for (sufficient) performance and concurrency: - -* Mutex locks are cheap enough when there isn't contention. -* Do not optimize code without analytical or observed proof that it is in a hot path. -* Don't over-use channels when mutex locks w/ encapsulation are sufficient. -* The need to drain channels are often a hint of unconsidered edge cases. -* The creation of O(N) one-off goroutines is generally technical debt that - needs to get addressed sooner than later. Avoid creating too many -goroutines as a patch around incomplete concurrency design, or at least be -aware of the debt and do not invest in the debt. On the other hand, Tendermint -is designed to have a limited number of peers (e.g. 10 or 20), so the creation -of O(C) goroutines per O(P) peers is still O(C\*P=constant). -* Use defer statements to unlock as much as possible. If you want to unlock sooner, - try to create more modular functions that do make use of defer statements. - -# Mantras - -* Premature optimization kills -* Readability is paramount -* Beautiful is better than fast. -* In the face of ambiguity, refuse the temptation to guess. -* In the face of bugs, refuse the temptation to cover the bug. -* There should be one-- and preferably only one --obvious way to do it. diff --git a/README.md b/README.md index d6ea7d0a72..9f6468a5a4 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,8 @@ a more detailed overview what to expect from this repository. ### Minimum requirements | Requirement | Notes | -| ----------- | ---------------- | -| Go version | Go1.14 or higher | +|-------------|------------------| +| Go version | Go1.15 or higher | ### Install diff --git a/UPGRADING.md b/UPGRADING.md index 7152a9400f..a6ce485d27 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -2,14 +2,46 @@ This guide provides instructions for upgrading to specific versions of Tendermint Core. +## Unreleased + +### ABCI Changes + +* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors. + +* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's. + +### Config Changes + +* `fast_sync = "v1"` is no longer supported. Please use `v2` instead. + +* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure + you have updated all the variables in your `config.toml` file. + +### CLI Changes + +* If you had previously used `tendermint gen_node_key` to generate a new node + key, keep in mind that it no longer saves the output to a file. You can use + `tendermint init` or pipe the output of `tendermint gen_node_key` to + `$TMHOME/config/node_key.json`: + + ``` + $ tendermint gen_node_key > $TMHOME/config/node_key.json + ``` + +* CLI commands and flags are all now hyphen-case instead of snake_case. + Make sure to adjust any scripts that calls a cli command with snake_casing ## v0.34.0 **Upgrading to Tendermint 0.34 requires a blockchain restart.** This release is not compatible with previous blockchains due to changes to the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol"). +Note also that Tendermint 0.34 also requires Go 1.15 or higher. + ### ABCI Changes +* The `ABCIVersion` is now `0.17.0`. + * New ABCI methods (`ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk`) were added to support the new State Sync feature. Previously, syncing a new node to a preexisting network could take days; but with State Sync, @@ -61,7 +93,7 @@ fields. Tendermint now relies on the application to tell it which transactions to index. This means that in the `config.toml`, generated by Tendermint, there is no longer a way to specify which -transactions to index. `tx.height` & `tx.hash` will always be indexed when using the `kv` indexer. +transactions to index. `tx.height` and `tx.hash` will always be indexed when using the `kv` indexer. Applications must now choose to either a) enable indexing for all transactions, or b) allow node operators to decide which transactions to index. @@ -107,7 +139,7 @@ Tendermint 0.34 includes new and updated consensus parameters. #### Evidence Parameters -* `MaxNum`, which caps the total amount of evidence by a absolute number. The default is 50. +* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB). ### Crypto @@ -168,16 +200,31 @@ Other user-relevant changes include: ### `privval` Package All requests are now accompanied by the chain ID from the network. -This is a optional field and can be ignored by key management systems. -It is recommended to check the chain ID if using the same key management system for multiple chains. +This is a optional field and can be ignored by key management systems; +however, if you are using the same key management system for multiple different +blockchains, we recommend that you check the chain ID. + ### RPC -`/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and -`/unsafe_write_heap_profile` were removed. -For profiling, please use the pprof server, which can -be enabled through `--rpc.pprof_laddr=X` flag or `pprof_laddr=X` config setting -in the rpc section. +* `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and + `/unsafe_write_heap_profile` were removed. + For profiling, please use the pprof server, which can + be enabled through `--rpc.pprof_laddr=X` flag or `pprof_laddr=X` config setting + in the rpc section. +* The `Content-Type` header returned on RPC calls is now (correctly) set as `application/json`. + +### Version + +Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time. + +Example: + +```sh +go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd +``` + +Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`. ## v0.33.4 diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 00fd2b310e..0000000000 --- a/Vagrantfile +++ /dev/null @@ -1,66 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure("2") do |config| - config.vm.box = "ubuntu/focal64" - - config.vm.provider "virtualbox" do |v| - v.memory = 4096 - v.cpus = 2 - end - - config.vm.provision "shell", inline: <<-SHELL - apt-get update - - # install base requirements - apt-get install -y --no-install-recommends wget curl jq zip \ - make shellcheck bsdmainutils psmisc - apt-get install -y language-pack-en - - # install docker - apt-get install -y --no-install-recommends apt-transport-https \ - ca-certificates \ - curl \ - gnupg-agent \ - software-properties-common - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - apt-get update - apt-get install -y docker-ce - usermod -aG docker vagrant - - # install go - wget -q https://dl.google.com/go/go1.14.linux-amd64.tar.gz - tar -xvf go1.14.linux-amd64.tar.gz - mv go /usr/local - rm -f go1.14.linux-amd64.tar.gz - - # install nodejs (for docs) - curl -sL https://deb.nodesource.com/setup_11.x | bash - - apt-get install -y nodejs - - # cleanup - apt-get autoremove -y - - # set env variables - echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile - echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile - echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile - echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile - echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile - - mkdir -p /home/vagrant/go/bin - mkdir -p /home/vagrant/go/src/github.com/tendermint - ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint - - chown -R vagrant:vagrant /home/vagrant/go - chown vagrant:vagrant /home/vagrant/.bash_profile - - # get all deps and tools, ready to install/test - su - vagrant -c 'source /home/vagrant/.bash_profile' - su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools' - SHELL -end diff --git a/abci/client/client.go b/abci/client/client.go index 4f63d97607..b53bac688c 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -1,6 +1,7 @@ package abcicli import ( + "context" "fmt" "sync" @@ -17,48 +18,52 @@ const ( //go:generate mockery --case underscore --name Client // Client defines an interface for an ABCI client. -// All `Async` methods return a `ReqRes` object. +// +// All `Async` methods return a `ReqRes` object and an error. // All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error. -// Note these are client errors, eg. ABCI socket connectivity issues. -// Application-related errors are reflected in response via ABCI error codes and logs. -//go:generate mockery --case underscore --name Client +// +// NOTE these are client errors, eg. ABCI socket connectivity issues. +// Application-related errors are reflected in response via ABCI error codes +// and logs. type Client interface { service.Service SetResponseCallback(Callback) Error() error - FlushAsync() *ReqRes - EchoAsync(msg string) *ReqRes - InfoAsync(types.RequestInfo) *ReqRes - DeliverTxAsync(types.RequestDeliverTx) *ReqRes - CheckTxAsync(types.RequestCheckTx) *ReqRes - QueryAsync(types.RequestQuery) *ReqRes - CommitAsync() *ReqRes - InitChainAsync(types.RequestInitChain) *ReqRes - BeginBlockAsync(types.RequestBeginBlock) *ReqRes - EndBlockAsync(types.RequestEndBlock) *ReqRes - ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes - OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes - LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes - ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes - PreprocessTxsAsync(types.RequestPreprocessTxs) *ReqRes - - FlushSync() error - EchoSync(msg string) (*types.ResponseEcho, error) - InfoSync(types.RequestInfo) (*types.ResponseInfo, error) - DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error) - CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error) - QuerySync(types.RequestQuery) (*types.ResponseQuery, error) - CommitSync() (*types.ResponseCommit, error) - InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) - BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) - ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) - PreprocessTxsSync(types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) + // Asynchronous requests + FlushAsync(context.Context) (*ReqRes, error) + EchoAsync(ctx context.Context, msg string) (*ReqRes, error) + InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error) + DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error) + CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error) + QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error) + CommitAsync(context.Context) (*ReqRes, error) + InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error) + BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error) + EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error) + ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error) + OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error) + LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error) + ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error) + PreprocessTxsAsync(context.Context, types.RequestPreprocessTxs) (*ReqRes, error) + + // Synchronous requests + FlushSync(context.Context) error + EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) + InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) + DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) + CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) + QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) + CommitSync(context.Context) (*types.ResponseCommit, error) + InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) + BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) + EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) + ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + PreprocessTxsSync(context.Context, types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) } //---------------------------------------- diff --git a/abci/client/doc.go b/abci/client/doc.go new file mode 100644 index 0000000000..eac40fe118 --- /dev/null +++ b/abci/client/doc.go @@ -0,0 +1,29 @@ +// Package abcicli provides an ABCI implementation in Go. +// +// There are 3 clients available: +// 1. socket (unix or TCP) +// 2. local (in memory) +// 3. gRPC +// +// ## Socket client +// +// async: the client maintains an internal buffer of a fixed size. when the +// buffer becomes full, all Async calls will return an error immediately. +// +// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the +// Flush requests 3) waiting for the Flush response +// +// ## Local client +// +// async: global mutex is locked during each call (meaning it's not really async!) +// sync: global mutex is locked during each call +// +// ## gRPC client +// +// async: gRPC is synchronous, but an internal buffer of a fixed size is used +// to store responses and later call callbacks (separate goroutine per +// response). +// +// sync: waits for all Async calls to complete (essentially what Flush does in +// the socket client) and calls Sync method. +package abcicli diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 1db181ad74..9f473206c5 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -1,11 +1,12 @@ package abcicli import ( + "context" "fmt" "net" + "sync" "time" - "golang.org/x/net/context" "google.golang.org/grpc" "github.com/lazyledger/lazyledger-core/abci/types" @@ -14,16 +15,14 @@ import ( tmsync "github.com/lazyledger/lazyledger-core/libs/sync" ) -var _ Client = (*grpcClient)(nil) - -// A stripped copy of the remoteClient that makes -// synchronous calls using grpc +// A gRPC client. type grpcClient struct { service.BaseService mustConnect bool - client types.ABCIApplicationClient - conn *grpc.ClientConn + client types.ABCIApplicationClient + conn *grpc.ClientConn + chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool mtx tmsync.Mutex addr string @@ -31,10 +30,29 @@ type grpcClient struct { resCb func(*types.Request, *types.Response) // listens to all callbacks } +var _ Client = (*grpcClient)(nil) + +// NewGRPCClient creates a gRPC client, which will connect to addr upon the +// start. Note Client#Start returns an error if connection is unsuccessful and +// mustConnect is true. +// +// GRPC calls are synchronous, but some callbacks expect to be called +// asynchronously (eg. the mempool expects to be able to lock to remove bad txs +// from cache). To accommodate, we finish each call in its own go-routine, +// which is expensive, but easy - if you want something better, use the socket +// protocol! maybe one day, if people really want it, we use grpc streams, but +// hopefully not :D func NewGRPCClient(addr string, mustConnect bool) Client { cli := &grpcClient{ addr: addr, mustConnect: mustConnect, + // Buffering the channel is needed to make calls appear asynchronous, + // which is required when the caller makes multiple async calls before + // processing callbacks (e.g. due to holding locks). 64 means that a + // caller can make up to 64 async calls before a callback must be + // processed (otherwise it deadlocks). It also means that we can make 64 + // gRPC calls while processing a slow callback at the channel head. + chReqRes: make(chan *ReqRes, 64), } cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli) return cli @@ -45,9 +63,36 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { } func (cli *grpcClient) OnStart() error { - if err := cli.BaseService.OnStart(); err != nil { - return err - } + // This processes asynchronous request/response messages and dispatches + // them to callbacks. + go func() { + // Use a separate function to use defer for mutex unlocks (this handles panics) + callCb := func(reqres *ReqRes) { + cli.mtx.Lock() + defer cli.mtx.Unlock() + + reqres.SetDone() + reqres.Done() + + // Notify client listener if set + if cli.resCb != nil { + cli.resCb(reqres.Request, reqres.Response) + } + + // Notify reqRes listener if set + if cb := reqres.GetCallback(); cb != nil { + cb(reqres.Response) + } + } + for reqres := range cli.chReqRes { + if reqres != nil { + callCb(reqres) + } else { + cli.Logger.Error("Received nil reqres") + } + } + }() + RETRY_LOOP: for { conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) @@ -80,11 +125,10 @@ RETRY_LOOP: } func (cli *grpcClient) OnStop() { - cli.BaseService.OnStop() - if cli.conn != nil { cli.conn.Close() } + close(cli.chReqRes) } func (cli *grpcClient) StopForError(err error) { @@ -119,249 +163,372 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) { } //---------------------------------------- -// GRPC calls are synchronous, but some callbacks expect to be called asynchronously -// (eg. the mempool expects to be able to lock to remove bad txs from cache). -// To accommodate, we finish each call in its own go-routine, -// which is expensive, but easy - if you want something better, use the socket protocol! -// maybe one day, if people really want it, we use grpc streams, -// but hopefully not :D - -func (cli *grpcClient) EchoAsync(msg string) *ReqRes { + +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { req := types.ToRequestEcho(msg) - res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true)) + res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}}) } -func (cli *grpcClient) FlushAsync() *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) { req := types.ToRequestFlush() - res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true)) + res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}}) } -func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) { req := types.ToRequestInfo(params) - res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true)) + res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}}) } -func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { req := types.ToRequestDeliverTx(params) - res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true)) + res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}}) } -func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) { req := types.ToRequestCheckTx(params) - res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true)) + res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}) } -func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) { req := types.ToRequestQuery(params) - res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true)) + res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}}) } -func (cli *grpcClient) CommitAsync() *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) { req := types.ToRequestCommit() - res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true)) + res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}}) } -func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) { req := types.ToRequestInitChain(params) - res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true)) + res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}}) } -func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) { req := types.ToRequestBeginBlock(params) - res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true)) + res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}}) } -func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) { req := types.ToRequestEndBlock(params) - res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true)) + res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) } -func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) { req := types.ToRequestListSnapshots(params) - res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true)) + res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}}) } -func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) { req := types.ToRequestOfferSnapshot(params) - res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true)) + res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}}) } -func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) LoadSnapshotChunkAsync( + ctx context.Context, + params types.RequestLoadSnapshotChunk, +) (*ReqRes, error) { req := types.ToRequestLoadSnapshotChunk(params) - res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) + res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) + return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) } -func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes { +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) ApplySnapshotChunkAsync( + ctx context.Context, + params types.RequestApplySnapshotChunk, +) (*ReqRes, error) { req := types.ToRequestApplySnapshotChunk(params) - res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) + res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}) -} - -func (cli *grpcClient) PreprocessTxsAsync(params types.RequestPreprocessTxs) *ReqRes { + return cli.finishAsyncCall( + ctx, + req, + &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}, + ) +} + +// NOTE: call is synchronous, use ctx to break early if needed +func (cli *grpcClient) PreprocessTxsAsync( + ctx context.Context, + params types.RequestPreprocessTxs, +) (*ReqRes, error) { req := types.ToRequestPreprocessTxs(params) - res, err := cli.client.PreprocessTxs(context.Background(), req.GetPreprocessTxs(), grpc.WaitForReady(true)) + res, err := cli.client.PreprocessTxs(ctx, req.GetPreprocessTxs(), grpc.WaitForReady(true)) if err != nil { - cli.StopForError(err) + return nil, err } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_PreprocessTxs{PreprocessTxs: res}}) + return cli.finishAsyncCall( + ctx, + req, + &types.Response{Value: &types.Response_PreprocessTxs{PreprocessTxs: res}}, + ) } -func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes { +// finishAsyncCall creates a ReqRes for an async call, and immediately populates it +// with the response. We don't complete it until it's been ordered via the channel. +func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) { reqres := NewReqRes(req) - reqres.Response = res // Set response - reqres.Done() // Release waiters - reqres.SetDone() // so reqRes.SetCallback will run the callback - - // goroutine for callbacks - go func() { - cli.mtx.Lock() - defer cli.mtx.Unlock() - - // Notify client listener if set - if cli.resCb != nil { - cli.resCb(reqres.Request, res) - } - - // Notify reqRes listener if set - if cb := reqres.GetCallback(); cb != nil { - cb(res) - } - }() + reqres.Response = res + select { + case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered + return reqres, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} - return reqres +// finishSyncCall waits for an async call to complete. It is necessary to call all +// sync calls asynchronously as well, to maintain call and response ordering via +// the channel, and this method will wait until the async call completes. +func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response { + // It's possible that the callback is called twice, since the callback can + // be called immediately on SetCallback() in addition to after it has been + // set. This is because completing the ReqRes happens in a separate critical + // section from the one where the callback is called: there is a race where + // SetCallback() is called between completing the ReqRes and dispatching the + // callback. + // + // We also buffer the channel with 1 response, since SetCallback() will be + // called synchronously if the reqres is already completed, in which case + // it will block on sending to the channel since it hasn't gotten around to + // receiving from it yet. + // + // ReqRes should really handle callback dispatch internally, to guarantee + // that it's only called once and avoid the above race conditions. + var once sync.Once + ch := make(chan *types.Response, 1) + reqres.SetCallback(func(res *types.Response) { + once.Do(func() { + ch <- res + }) + }) + return <-ch } //---------------------------------------- -func (cli *grpcClient) FlushSync() error { +func (cli *grpcClient) FlushSync(ctx context.Context) error { return nil } -func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) { - reqres := cli.EchoAsync(msg) - // StopForError should already have been called if error is set - return reqres.Response.GetEcho(), cli.Error() +func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + reqres, err := cli.EchoAsync(ctx, msg) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetEcho(), cli.Error() } -func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - reqres := cli.InfoAsync(req) - return reqres.Response.GetInfo(), cli.Error() +func (cli *grpcClient) InfoSync( + ctx context.Context, + req types.RequestInfo, +) (*types.ResponseInfo, error) { + reqres, err := cli.InfoAsync(ctx, req) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetInfo(), cli.Error() } -func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { - reqres := cli.DeliverTxAsync(params) - return reqres.Response.GetDeliverTx(), cli.Error() +func (cli *grpcClient) DeliverTxSync( + ctx context.Context, + params types.RequestDeliverTx, +) (*types.ResponseDeliverTx, error) { + + reqres, err := cli.DeliverTxAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error() } -func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) { - reqres := cli.CheckTxAsync(params) - return reqres.Response.GetCheckTx(), cli.Error() +func (cli *grpcClient) CheckTxSync( + ctx context.Context, + params types.RequestCheckTx, +) (*types.ResponseCheckTx, error) { + + reqres, err := cli.CheckTxAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error() } -func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { - reqres := cli.QueryAsync(req) - return reqres.Response.GetQuery(), cli.Error() +func (cli *grpcClient) QuerySync( + ctx context.Context, + req types.RequestQuery, +) (*types.ResponseQuery, error) { + reqres, err := cli.QueryAsync(ctx, req) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetQuery(), cli.Error() } -func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) { - reqres := cli.CommitAsync() - return reqres.Response.GetCommit(), cli.Error() +func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { + reqres, err := cli.CommitAsync(ctx) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetCommit(), cli.Error() } -func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) { - reqres := cli.InitChainAsync(params) - return reqres.Response.GetInitChain(), cli.Error() +func (cli *grpcClient) InitChainSync( + ctx context.Context, + params types.RequestInitChain, +) (*types.ResponseInitChain, error) { + + reqres, err := cli.InitChainAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetInitChain(), cli.Error() } -func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - reqres := cli.BeginBlockAsync(params) - return reqres.Response.GetBeginBlock(), cli.Error() +func (cli *grpcClient) BeginBlockSync( + ctx context.Context, + params types.RequestBeginBlock, +) (*types.ResponseBeginBlock, error) { + + reqres, err := cli.BeginBlockAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error() } -func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) { - reqres := cli.EndBlockAsync(params) - return reqres.Response.GetEndBlock(), cli.Error() +func (cli *grpcClient) EndBlockSync( + ctx context.Context, + params types.RequestEndBlock, +) (*types.ResponseEndBlock, error) { + + reqres, err := cli.EndBlockAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error() } -func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - reqres := cli.ListSnapshotsAsync(params) - return reqres.Response.GetListSnapshots(), cli.Error() +func (cli *grpcClient) ListSnapshotsSync( + ctx context.Context, + params types.RequestListSnapshots, +) (*types.ResponseListSnapshots, error) { + + reqres, err := cli.ListSnapshotsAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error() } -func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - reqres := cli.OfferSnapshotAsync(params) - return reqres.Response.GetOfferSnapshot(), cli.Error() +func (cli *grpcClient) OfferSnapshotSync( + ctx context.Context, + params types.RequestOfferSnapshot, +) (*types.ResponseOfferSnapshot, error) { + + reqres, err := cli.OfferSnapshotAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error() } func (cli *grpcClient) LoadSnapshotChunkSync( + ctx context.Context, params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - reqres := cli.LoadSnapshotChunkAsync(params) - return reqres.Response.GetLoadSnapshotChunk(), cli.Error() + + reqres, err := cli.LoadSnapshotChunkAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error() } func (cli *grpcClient) ApplySnapshotChunkSync( + ctx context.Context, params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - reqres := cli.ApplySnapshotChunkAsync(params) - return reqres.Response.GetApplySnapshotChunk(), cli.Error() + + reqres, err := cli.ApplySnapshotChunkAsync(ctx, params) + if err != nil { + return nil, err + } + return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error() } func (cli *grpcClient) PreprocessTxsSync( - params types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { - reqres := cli.PreprocessTxsAsync(params) + ctx context.Context, + params types.RequestPreprocessTxs, +) (*types.ResponsePreprocessTxs, error) { + reqres, err := cli.PreprocessTxsAsync(ctx, params) + if err != nil { + return nil, err + } return reqres.Response.GetPreprocessTxs(), cli.Error() } diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 4321d11ccb..e4e58ed89f 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -1,13 +1,13 @@ package abcicli import ( + "context" + types "github.com/lazyledger/lazyledger-core/abci/types" "github.com/lazyledger/lazyledger-core/libs/service" tmsync "github.com/lazyledger/lazyledger-core/libs/sync" ) -var _ Client = (*localClient)(nil) - // NOTE: use defer to unlock mutex because Application might panic (e.g., in // case of malicious tx or query). It only makes sense for publicly exposed // methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query @@ -20,6 +20,12 @@ type localClient struct { Callback } +var _ Client = (*localClient)(nil) + +// NewLocalClient creates a local client, which will be directly calling the +// methods of the given app. +// +// Both Async and Sync methods ignore the given context.Context parameter. func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { if mtx == nil { mtx = new(tmsync.Mutex) @@ -43,22 +49,22 @@ func (app *localClient) Error() error { return nil } -func (app *localClient) FlushAsync() *ReqRes { +func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) { // Do nothing - return newLocalReqRes(types.ToRequestFlush(), nil) + return newLocalReqRes(types.ToRequestFlush(), nil), nil } -func (app *localClient) EchoAsync(msg string) *ReqRes { +func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() return app.callback( types.ToRequestEcho(msg), types.ToResponseEcho(msg), - ) + ), nil } -func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes { +func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -66,10 +72,10 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes { return app.callback( types.ToRequestInfo(req), types.ToResponseInfo(res), - ) + ), nil } -func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { +func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -77,10 +83,10 @@ func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { return app.callback( types.ToRequestDeliverTx(params), types.ToResponseDeliverTx(res), - ) + ), nil } -func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes { +func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -88,10 +94,10 @@ func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes { return app.callback( types.ToRequestCheckTx(req), types.ToResponseCheckTx(res), - ) + ), nil } -func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes { +func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -99,10 +105,10 @@ func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes { return app.callback( types.ToRequestQuery(req), types.ToResponseQuery(res), - ) + ), nil } -func (app *localClient) CommitAsync() *ReqRes { +func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -110,10 +116,10 @@ func (app *localClient) CommitAsync() *ReqRes { return app.callback( types.ToRequestCommit(), types.ToResponseCommit(res), - ) + ), nil } -func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes { +func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -121,10 +127,10 @@ func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes { return app.callback( types.ToRequestInitChain(req), types.ToResponseInitChain(res), - ) + ), nil } -func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes { +func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -132,10 +138,10 @@ func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes { return app.callback( types.ToRequestBeginBlock(req), types.ToResponseBeginBlock(res), - ) + ), nil } -func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { +func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -143,10 +149,10 @@ func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { return app.callback( types.ToRequestEndBlock(req), types.ToResponseEndBlock(res), - ) + ), nil } -func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { +func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -154,10 +160,10 @@ func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqR return app.callback( types.ToRequestListSnapshots(req), types.ToResponseListSnapshots(res), - ) + ), nil } -func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { +func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -165,10 +171,13 @@ func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqR return app.callback( types.ToRequestOfferSnapshot(req), types.ToResponseOfferSnapshot(res), - ) + ), nil } -func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { +func (app *localClient) LoadSnapshotChunkAsync( + ctx context.Context, + req types.RequestLoadSnapshotChunk, +) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -176,10 +185,13 @@ func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChun return app.callback( types.ToRequestLoadSnapshotChunk(req), types.ToResponseLoadSnapshotChunk(res), - ) + ), nil } -func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { +func (app *localClient) ApplySnapshotChunkAsync( + ctx context.Context, + req types.RequestApplySnapshotChunk, +) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -187,10 +199,13 @@ func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotCh return app.callback( types.ToRequestApplySnapshotChunk(req), types.ToResponseApplySnapshotChunk(res), - ) + ), nil } -func (app *localClient) PreprocessTxsAsync(req types.RequestPreprocessTxs) *ReqRes { +func (app *localClient) PreprocessTxsAsync( + ctx context.Context, + req types.RequestPreprocessTxs, +) (*ReqRes, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -198,20 +213,20 @@ func (app *localClient) PreprocessTxsAsync(req types.RequestPreprocessTxs) *ReqR return app.callback( types.ToRequestPreprocessTxs(req), types.ToResponsePreprocessTx(res), - ) + ), nil } //------------------------------------------------------- -func (app *localClient) FlushSync() error { +func (app *localClient) FlushSync(ctx context.Context) error { return nil } -func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) { +func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { return &types.ResponseEcho{Message: msg}, nil } -func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { +func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -219,7 +234,11 @@ func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er return &res, nil } -func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { +func (app *localClient) DeliverTxSync( + ctx context.Context, + req types.RequestDeliverTx, +) (*types.ResponseDeliverTx, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -227,7 +246,10 @@ func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.Respon return &res, nil } -func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (app *localClient) CheckTxSync( + ctx context.Context, + req types.RequestCheckTx, +) (*types.ResponseCheckTx, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -235,7 +257,10 @@ func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCh return &res, nil } -func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { +func (app *localClient) QuerySync( + ctx context.Context, + req types.RequestQuery, +) (*types.ResponseQuery, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -243,7 +268,7 @@ func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, return &res, nil } -func (app *localClient) CommitSync() (*types.ResponseCommit, error) { +func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -251,7 +276,11 @@ func (app *localClient) CommitSync() (*types.ResponseCommit, error) { return &res, nil } -func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { +func (app *localClient) InitChainSync( + ctx context.Context, + req types.RequestInitChain, +) (*types.ResponseInitChain, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -259,7 +288,11 @@ func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.Respon return &res, nil } -func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { +func (app *localClient) BeginBlockSync( + ctx context.Context, + req types.RequestBeginBlock, +) (*types.ResponseBeginBlock, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -267,7 +300,11 @@ func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.Resp return &res, nil } -func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { +func (app *localClient) EndBlockSync( + ctx context.Context, + req types.RequestEndBlock, +) (*types.ResponseEndBlock, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -275,7 +312,11 @@ func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.Response return &res, nil } -func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +func (app *localClient) ListSnapshotsSync( + ctx context.Context, + req types.RequestListSnapshots, +) (*types.ResponseListSnapshots, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -283,7 +324,11 @@ func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*type return &res, nil } -func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +func (app *localClient) OfferSnapshotSync( + ctx context.Context, + req types.RequestOfferSnapshot, +) (*types.ResponseOfferSnapshot, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -292,7 +337,9 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type } func (app *localClient) LoadSnapshotChunkSync( + ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -301,7 +348,9 @@ func (app *localClient) LoadSnapshotChunkSync( } func (app *localClient) ApplySnapshotChunkSync( + ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + app.mtx.Lock() defer app.mtx.Unlock() @@ -310,7 +359,9 @@ func (app *localClient) ApplySnapshotChunkSync( } func (app *localClient) PreprocessTxsSync( - req types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { + ctx context.Context, + req types.RequestPreprocessTxs, +) (*types.ResponsePreprocessTxs, error) { app.mtx.Lock() defer app.mtx.Unlock() diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 4392853009..5f92732a63 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -1,9 +1,12 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks import ( + context "context" + abcicli "github.com/lazyledger/lazyledger-core/abci/client" + log "github.com/lazyledger/lazyledger-core/libs/log" mock "github.com/stretchr/testify/mock" @@ -16,29 +19,36 @@ type Client struct { mock.Mock } -// ApplySnapshotChunkAsync provides a mock function with given fields: _a0 -func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes { - ret := _m.Called(_a0) +// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// ApplySnapshotChunkSync provides a mock function with given fields: _a0 -func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - ret := _m.Called(_a0) +// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseApplySnapshotChunk - if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) @@ -46,8 +56,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (* } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -55,29 +65,36 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (* return r0, r1 } -// BeginBlockAsync provides a mock function with given fields: _a0 -func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes { - ret := _m.Called(_a0) +// BeginBlockAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// BeginBlockSync provides a mock function with given fields: _a0 -func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - ret := _m.Called(_a0) +// BeginBlockSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseBeginBlock) @@ -85,8 +102,8 @@ func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBe } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -94,29 +111,36 @@ func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBe return r0, r1 } -// CheckTxAsync provides a mock function with given fields: _a0 -func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes { - ret := _m.Called(_a0) +// CheckTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// CheckTxSync provides a mock function with given fields: _a0 -func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) { - ret := _m.Called(_a0) +// CheckTxSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseCheckTx - if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseCheckTx) @@ -124,8 +148,8 @@ func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -133,29 +157,36 @@ func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, return r0, r1 } -// CommitAsync provides a mock function with given fields: -func (_m *Client) CommitAsync() *abcicli.ReqRes { - ret := _m.Called() +// CommitAsync provides a mock function with given fields: _a0 +func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// CommitSync provides a mock function with given fields: -func (_m *Client) CommitSync() (*types.ResponseCommit, error) { - ret := _m.Called() +// CommitSync provides a mock function with given fields: _a0 +func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { + ret := _m.Called(_a0) var r0 *types.ResponseCommit - if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseCommit) @@ -163,8 +194,8 @@ func (_m *Client) CommitSync() (*types.ResponseCommit, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -172,29 +203,36 @@ func (_m *Client) CommitSync() (*types.ResponseCommit, error) { return r0, r1 } -// DeliverTxAsync provides a mock function with given fields: _a0 -func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { - ret := _m.Called(_a0) +// DeliverTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// DeliverTxSync provides a mock function with given fields: _a0 -func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { - ret := _m.Called(_a0) +// DeliverTxSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseDeliverTx - if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseDeliverTx) @@ -202,8 +240,8 @@ func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeli } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -211,29 +249,36 @@ func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeli return r0, r1 } -// EchoAsync provides a mock function with given fields: msg -func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes { - ret := _m.Called(msg) +// EchoAsync provides a mock function with given fields: ctx, msg +func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { + ret := _m.Called(ctx, msg) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok { - r0 = rf(msg) + if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok { + r0 = rf(ctx, msg) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// EchoSync provides a mock function with given fields: msg -func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) { - ret := _m.Called(msg) +// EchoSync provides a mock function with given fields: ctx, msg +func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + ret := _m.Called(ctx, msg) var r0 *types.ResponseEcho - if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok { - r0 = rf(msg) + if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok { + r0 = rf(ctx, msg) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseEcho) @@ -241,8 +286,8 @@ func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) { } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(msg) + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, msg) } else { r1 = ret.Error(1) } @@ -250,29 +295,36 @@ func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) { return r0, r1 } -// EndBlockAsync provides a mock function with given fields: _a0 -func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes { - ret := _m.Called(_a0) +// EndBlockAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// EndBlockSync provides a mock function with given fields: _a0 -func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { - ret := _m.Called(_a0) +// EndBlockSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseEndBlock) @@ -280,8 +332,8 @@ func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlo } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -303,29 +355,36 @@ func (_m *Client) Error() error { return r0 } -// FlushAsync provides a mock function with given fields: -func (_m *Client) FlushAsync() *abcicli.ReqRes { - ret := _m.Called() +// FlushAsync provides a mock function with given fields: _a0 +func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// FlushSync provides a mock function with given fields: -func (_m *Client) FlushSync() error { - ret := _m.Called() +// FlushSync provides a mock function with given fields: _a0 +func (_m *Client) FlushSync(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -333,29 +392,36 @@ func (_m *Client) FlushSync() error { return r0 } -// InfoAsync provides a mock function with given fields: _a0 -func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes { - ret := _m.Called(_a0) +// InfoAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// InfoSync provides a mock function with given fields: _a0 -func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { - ret := _m.Called(_a0) +// InfoSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseInfo - if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseInfo) @@ -363,8 +429,8 @@ func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -372,29 +438,36 @@ func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { return r0, r1 } -// InitChainAsync provides a mock function with given fields: _a0 -func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes { - ret := _m.Called(_a0) +// InitChainAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// InitChainSync provides a mock function with given fields: _a0 -func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { - ret := _m.Called(_a0) +// InitChainSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseInitChain - if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseInitChain) @@ -402,8 +475,8 @@ func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInit } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -425,29 +498,36 @@ func (_m *Client) IsRunning() bool { return r0 } -// ListSnapshotsAsync provides a mock function with given fields: _a0 -func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes { - ret := _m.Called(_a0) +// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// ListSnapshotsSync provides a mock function with given fields: _a0 -func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - ret := _m.Called(_a0) +// ListSnapshotsSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseListSnapshots - if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseListSnapshots) @@ -455,8 +535,8 @@ func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.Resp } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -464,29 +544,36 @@ func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.Resp return r0, r1 } -// LoadSnapshotChunkAsync provides a mock function with given fields: _a0 -func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes { - ret := _m.Called(_a0) +// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// LoadSnapshotChunkSync provides a mock function with given fields: _a0 -func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - ret := _m.Called(_a0) +// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseLoadSnapshotChunk - if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) @@ -494,8 +581,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*ty } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -503,29 +590,36 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*ty return r0, r1 } -// OfferSnapshotAsync provides a mock function with given fields: _a0 -func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes { - ret := _m.Called(_a0) +// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// OfferSnapshotSync provides a mock function with given fields: _a0 -func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - ret := _m.Called(_a0) +// OfferSnapshotSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseOfferSnapshot - if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseOfferSnapshot) @@ -533,8 +627,8 @@ func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.Resp } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -575,29 +669,36 @@ func (_m *Client) OnStop() { _m.Called() } -// PreprocessTxsAsync provides a mock function with given fields: _a0 -func (_m *Client) PreprocessTxsAsync(_a0 types.RequestPreprocessTxs) *abcicli.ReqRes { - ret := _m.Called(_a0) +// PreprocessTxsAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) PreprocessTxsAsync(_a0 context.Context, _a1 types.RequestPreprocessTxs) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestPreprocessTxs) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPreprocessTxs) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPreprocessTxs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// PreprocessTxsSync provides a mock function with given fields: _a0 -func (_m *Client) PreprocessTxsSync(_a0 types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { - ret := _m.Called(_a0) +// PreprocessTxsSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) PreprocessTxsSync(_a0 context.Context, _a1 types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponsePreprocessTxs - if rf, ok := ret.Get(0).(func(types.RequestPreprocessTxs) *types.ResponsePreprocessTxs); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPreprocessTxs) *types.ResponsePreprocessTxs); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponsePreprocessTxs) @@ -605,8 +706,8 @@ func (_m *Client) PreprocessTxsSync(_a0 types.RequestPreprocessTxs) (*types.Resp } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestPreprocessTxs) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPreprocessTxs) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -614,29 +715,36 @@ func (_m *Client) PreprocessTxsSync(_a0 types.RequestPreprocessTxs) (*types.Resp return r0, r1 } -// QueryAsync provides a mock function with given fields: _a0 -func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes { - ret := _m.Called(_a0) +// QueryAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// QuerySync provides a mock function with given fields: _a0 -func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) { - ret := _m.Called(_a0) +// QuerySync provides a mock function with given fields: _a0, _a1 +func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseQuery - if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseQuery) @@ -644,8 +752,8 @@ func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 364be0f1be..cd61fa2960 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -3,6 +3,7 @@ package abcicli import ( "bufio" "container/list" + "context" "errors" "fmt" "io" @@ -18,10 +19,18 @@ import ( ) const ( - reqQueueSize = 256 // TODO make configurable - flushThrottleMS = 20 // Don't wait longer than... + // reqQueueSize is the max number of queued async requests. + // (memory: 256MB max assuming 1MB transactions) + reqQueueSize = 256 + // Don't wait longer than... + flushThrottleMS = 20 ) +type reqResWithContext struct { + R *ReqRes + C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored) +} + // This is goroutine-safe, but users should beware that the application in // general is not meant to be interfaced with concurrent callers. type socketClient struct { @@ -31,7 +40,7 @@ type socketClient struct { mustConnect bool conn net.Conn - reqQueue chan *ReqRes + reqQueue chan *reqResWithContext flushTimer *timer.ThrottleTimer mtx tmsync.Mutex @@ -47,7 +56,7 @@ var _ Client = (*socketClient)(nil) // if it fails to connect. func NewSocketClient(addr string, mustConnect bool) Client { cli := &socketClient{ - reqQueue: make(chan *ReqRes, reqQueueSize), + reqQueue: make(chan *reqResWithContext, reqQueueSize), flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS), mustConnect: mustConnect, @@ -123,15 +132,20 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { case reqres := <-cli.reqQueue: // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) - cli.willSendReq(reqres) - err := types.WriteMessage(reqres.Request, w) + if reqres.C.Err() != nil { + cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) + continue + } + + cli.willSendReq(reqres.R) + err := types.WriteMessage(reqres.R.Request, w) if err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return } // If it's a flush request, flush the current buffer. - if _, ok := reqres.Request.Value.(*types.Request_Flush); ok { + if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok { err = w.Flush() if err != nil { cli.stopForError(fmt.Errorf("flush buffer: %w", err)) @@ -140,7 +154,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { } case <-cli.flushTimer.Ch: // flush queue select { - case cli.reqQueue <- NewReqRes(types.ToRequestFlush()): + case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}: default: // Probably will fill the buffer, or retry later. } @@ -221,199 +235,250 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { //---------------------------------------- -func (cli *socketClient) EchoAsync(msg string) *ReqRes { - return cli.queueRequest(types.ToRequestEcho(msg)) +func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg)) } -func (cli *socketClient) FlushAsync() *ReqRes { - return cli.queueRequest(types.ToRequestFlush()) +func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestFlush()) } -func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes { - return cli.queueRequest(types.ToRequestInfo(req)) +func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestInfo(req)) } -func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes { - return cli.queueRequest(types.ToRequestDeliverTx(req)) +func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req)) } -func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes { - return cli.queueRequest(types.ToRequestCheckTx(req)) +func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req)) } -func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes { - return cli.queueRequest(types.ToRequestQuery(req)) +func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestQuery(req)) } -func (cli *socketClient) CommitAsync() *ReqRes { - return cli.queueRequest(types.ToRequestCommit()) +func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestCommit()) } -func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes { - return cli.queueRequest(types.ToRequestInitChain(req)) +func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req)) } -func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes { - return cli.queueRequest(types.ToRequestBeginBlock(req)) +func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req)) } -func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { - return cli.queueRequest(types.ToRequestEndBlock(req)) +func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req)) } -func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { - return cli.queueRequest(types.ToRequestListSnapshots(req)) +func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req)) } -func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { - return cli.queueRequest(types.ToRequestOfferSnapshot(req)) +func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req)) } -func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { - return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req)) +func (cli *socketClient) LoadSnapshotChunkAsync( + ctx context.Context, + req types.RequestLoadSnapshotChunk, +) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req)) } -func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { - return cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) +func (cli *socketClient) ApplySnapshotChunkAsync( + ctx context.Context, + req types.RequestApplySnapshotChunk, +) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req)) } -func (cli *socketClient) PreprocessTxsAsync(req types.RequestPreprocessTxs) *ReqRes { - return cli.queueRequest(types.ToRequestPreprocessTxs(req)) +func (cli *socketClient) PreprocessTxsAsync( + ctx context.Context, + req types.RequestPreprocessTxs, +) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, types.ToRequestPreprocessTxs(req)) } //---------------------------------------- -func (cli *socketClient) FlushSync() error { - reqRes := cli.queueRequest(types.ToRequestFlush()) +func (cli *socketClient) FlushSync(ctx context.Context) error { + reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true) + if err != nil { + return queueErr(err) + } + if err := cli.Error(); err != nil { return err } - reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here - return cli.Error() + + gotResp := make(chan struct{}) + go func() { + // NOTE: if we don't flush the queue, its possible to get stuck here + reqRes.Wait() + close(gotResp) + }() + + select { + case <-gotResp: + return cli.Error() + case <-ctx.Done(): + return ctx.Err() + } } -func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) { - reqres := cli.queueRequest(types.ToRequestEcho(msg)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg)) + if err != nil { return nil, err } - - return reqres.Response.GetEcho(), cli.Error() + return reqres.Response.GetEcho(), nil } -func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - reqres := cli.queueRequest(types.ToRequestInfo(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) InfoSync( + ctx context.Context, + req types.RequestInfo, +) (*types.ResponseInfo, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req)) + if err != nil { return nil, err } - - return reqres.Response.GetInfo(), cli.Error() + return reqres.Response.GetInfo(), nil } -func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { - reqres := cli.queueRequest(types.ToRequestDeliverTx(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) DeliverTxSync( + ctx context.Context, + req types.RequestDeliverTx, +) (*types.ResponseDeliverTx, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req)) + if err != nil { return nil, err } - - return reqres.Response.GetDeliverTx(), cli.Error() + return reqres.Response.GetDeliverTx(), nil } -func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { - reqres := cli.queueRequest(types.ToRequestCheckTx(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) CheckTxSync( + ctx context.Context, + req types.RequestCheckTx, +) (*types.ResponseCheckTx, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req)) + if err != nil { return nil, err } - - return reqres.Response.GetCheckTx(), cli.Error() + return reqres.Response.GetCheckTx(), nil } -func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { - reqres := cli.queueRequest(types.ToRequestQuery(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) QuerySync( + ctx context.Context, + req types.RequestQuery, +) (*types.ResponseQuery, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req)) + if err != nil { return nil, err } - - return reqres.Response.GetQuery(), cli.Error() + return reqres.Response.GetQuery(), nil } -func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) { - reqres := cli.queueRequest(types.ToRequestCommit()) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit()) + if err != nil { return nil, err } - - return reqres.Response.GetCommit(), cli.Error() + return reqres.Response.GetCommit(), nil } -func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { - reqres := cli.queueRequest(types.ToRequestInitChain(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) InitChainSync( + ctx context.Context, + req types.RequestInitChain, +) (*types.ResponseInitChain, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req)) + if err != nil { return nil, err } - - return reqres.Response.GetInitChain(), cli.Error() + return reqres.Response.GetInitChain(), nil } -func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - reqres := cli.queueRequest(types.ToRequestBeginBlock(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) BeginBlockSync( + ctx context.Context, + req types.RequestBeginBlock, +) (*types.ResponseBeginBlock, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req)) + if err != nil { return nil, err } - - return reqres.Response.GetBeginBlock(), cli.Error() + return reqres.Response.GetBeginBlock(), nil } -func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { - reqres := cli.queueRequest(types.ToRequestEndBlock(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) EndBlockSync( + ctx context.Context, + req types.RequestEndBlock, +) (*types.ResponseEndBlock, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req)) + if err != nil { return nil, err } - - return reqres.Response.GetEndBlock(), cli.Error() + return reqres.Response.GetEndBlock(), nil } -func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - reqres := cli.queueRequest(types.ToRequestListSnapshots(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) ListSnapshotsSync( + ctx context.Context, + req types.RequestListSnapshots, +) (*types.ResponseListSnapshots, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req)) + if err != nil { return nil, err } - - return reqres.Response.GetListSnapshots(), cli.Error() + return reqres.Response.GetListSnapshots(), nil } -func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req)) - if err := cli.FlushSync(); err != nil { +func (cli *socketClient) OfferSnapshotSync( + ctx context.Context, + req types.RequestOfferSnapshot, +) (*types.ResponseOfferSnapshot, error) { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req)) + if err != nil { return nil, err } - - return reqres.Response.GetOfferSnapshot(), cli.Error() + return reqres.Response.GetOfferSnapshot(), nil } func (cli *socketClient) LoadSnapshotChunkSync( + ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req)) - if err := cli.FlushSync(); err != nil { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req)) + if err != nil { return nil, err } - - return reqres.Response.GetLoadSnapshotChunk(), cli.Error() + return reqres.Response.GetLoadSnapshotChunk(), nil } func (cli *socketClient) ApplySnapshotChunkSync( + ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) - if err := cli.FlushSync(); err != nil { + + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req)) + if err != nil { return nil, err } - return reqres.Response.GetApplySnapshotChunk(), cli.Error() + return reqres.Response.GetApplySnapshotChunk(), nil } func (cli *socketClient) PreprocessTxsSync( + ctx context.Context, req types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { - reqres := cli.queueRequest(types.ToRequestPreprocessTxs(req)) - if err := cli.FlushSync(); err != nil { + reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestPreprocessTxs(req)) + if err != nil { return nil, err } return reqres.Response.GetPreprocessTxs(), cli.Error() @@ -421,11 +486,30 @@ func (cli *socketClient) PreprocessTxsSync( //---------------------------------------- -func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { +// queueRequest enqueues req onto the queue. If the queue is full, it ether +// returns an error (sync=false) or blocks (sync=true). +// +// When sync=true, ctx can be used to break early. When sync=false, ctx will be +// used later to determine if request should be dropped (if ctx.Err is +// non-nil). +// +// The caller is responsible for checking cli.Error. +func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) { reqres := NewReqRes(req) - // TODO: set cli.err if reqQueue times out - cli.reqQueue <- reqres + if sync { + select { + case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}: + case <-ctx.Done(): + return nil, ctx.Err() + } + } else { + select { + case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}: + default: + return nil, errors.New("buffer is full") + } + } // Maybe auto-flush, or unset auto-flush switch req.Value.(type) { @@ -435,7 +519,41 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { cli.flushTimer.Set() } - return reqres + return reqres, nil +} + +func (cli *socketClient) queueRequestAsync( + ctx context.Context, + req *types.Request, +) (*ReqRes, error) { + + reqres, err := cli.queueRequest(ctx, req, false) + if err != nil { + return nil, queueErr(err) + } + + return reqres, cli.Error() +} + +func (cli *socketClient) queueRequestAndFlushSync( + ctx context.Context, + req *types.Request, +) (*ReqRes, error) { + + reqres, err := cli.queueRequest(ctx, req, true) + if err != nil { + return nil, queueErr(err) + } + + if err := cli.FlushSync(ctx); err != nil { + return nil, err + } + + return reqres, cli.Error() +} + +func queueErr(e error) error { + return fmt.Errorf("can't queue req: %w", e) } func (cli *socketClient) flushQueue() { @@ -453,7 +571,7 @@ LOOP: for { select { case reqres := <-cli.reqQueue: - reqres.Done() + reqres.R.Done() default: break LOOP } @@ -504,12 +622,10 @@ func (cli *socketClient) stopForError(err error) { } cli.mtx.Lock() - if cli.err == nil { - cli.err = err - } + cli.err = err cli.mtx.Unlock() - cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error())) + cli.Logger.Info("Stopping abci.socketClient", "reason", err) if err := cli.Stop(); err != nil { cli.Logger.Error("Error stopping abci.socketClient", "err", err) } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 924c83f1c2..814c5f0d20 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -1,6 +1,7 @@ package abcicli_test import ( + "context" "fmt" "testing" "time" @@ -15,6 +16,8 @@ import ( "github.com/lazyledger/lazyledger-core/libs/service" ) +var ctx = context.Background() + func TestProperSyncCalls(t *testing.T) { app := slowApp{} @@ -33,11 +36,12 @@ func TestProperSyncCalls(t *testing.T) { resp := make(chan error, 1) go func() { // This is BeginBlockSync unrolled.... - reqres := c.BeginBlockAsync(types.RequestBeginBlock{}) - err := c.FlushSync() - require.NoError(t, err) + reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) + assert.NoError(t, err) + err = c.FlushSync(context.Background()) + assert.NoError(t, err) res := reqres.Response.GetBeginBlock() - require.NotNil(t, res) + assert.NotNil(t, res) resp <- c.Error() }() @@ -68,14 +72,16 @@ func TestHangingSyncCalls(t *testing.T) { resp := make(chan error, 1) go func() { // Start BeginBlock and flush it - reqres := c.BeginBlockAsync(types.RequestBeginBlock{}) - flush := c.FlushAsync() + reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) + assert.NoError(t, err) + flush, err := c.FlushAsync(ctx) + assert.NoError(t, err) // wait 20 ms for all events to travel socket, but // no response yet from server time.Sleep(20 * time.Millisecond) // kill the server, so the connections break - err := s.Stop() - require.NoError(t, err) + err = s.Stop() + assert.NoError(t, err) // wait for the response from BeginBlock reqres.Wait() diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 4916e481e5..17e9f037c4 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -2,6 +2,7 @@ package main import ( "bufio" + "context" "encoding/hex" "errors" "fmt" @@ -28,6 +29,8 @@ import ( var ( client abcicli.Client logger log.Logger + + ctx = context.Background() ) // flags @@ -462,7 +465,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error { if len(args) > 0 { msg = args[0] } - res, err := client.EchoSync(msg) + res, err := client.EchoSync(ctx, msg) if err != nil { return err } @@ -478,7 +481,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { if len(args) == 1 { version = args[0] } - res, err := client.InfoSync(types.RequestInfo{Version: version}) + res, err := client.InfoSync(ctx, types.RequestInfo{Version: version}) if err != nil { return err } @@ -503,7 +506,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes}) + res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) if err != nil { return err } @@ -529,7 +532,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) if err != nil { return err } @@ -544,7 +547,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.CommitSync() + res, err := client.CommitSync(ctx) if err != nil { return err } @@ -569,7 +572,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.QuerySync(types.RequestQuery{ + resQuery, err := client.QuerySync(ctx, types.RequestQuery{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), diff --git a/abci/example/example_test.go b/abci/example/example_test.go index d5b76f7e28..b969f9bd8f 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -1,6 +1,7 @@ package example import ( + "context" "fmt" "math/rand" "net" @@ -10,11 +11,8 @@ import ( "time" "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "golang.org/x/net/context" - "github.com/lazyledger/lazyledger-core/libs/log" tmnet "github.com/lazyledger/lazyledger-core/libs/net" @@ -45,7 +43,7 @@ func TestGRPC(t *testing.T) { } func testStream(t *testing.T, app types.Application) { - numDeliverTxs := 20000 + const numDeliverTxs = 20000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) @@ -53,9 +51,8 @@ func testStream(t *testing.T, app types.Application) { // Start the listener server := abciserver.NewSocketServer(socket, app) server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - require.NoError(t, err, "Error starting socket server") - } + err := server.Start() + require.NoError(t, err) t.Cleanup(func() { if err := server.Stop(); err != nil { t.Error(err) @@ -65,9 +62,8 @@ func testStream(t *testing.T, app types.Application) { // Connect to the socket client := abcicli.NewSocketClient(socket, false) client.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := client.Start(); err != nil { - t.Fatalf("Error starting socket client: %v", err.Error()) - } + err = client.Start() + require.NoError(t, err) t.Cleanup(func() { if err := client.Stop(); err != nil { t.Error(err) @@ -101,22 +97,24 @@ func testStream(t *testing.T, app types.Application) { } }) + ctx := context.Background() + // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request - reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")}) - _ = reqRes - // check err ? + _, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")}) + require.NoError(t, err) // Sometimes send flush messages - if counter%123 == 0 { - client.FlushAsync() - // check err ? + if counter%128 == 0 { + err = client.FlushSync(context.Background()) + require.NoError(t, err) } } // Send final flush message - client.FlushAsync() + _, err = client.FlushAsync(ctx) + require.NoError(t, err) <-done } diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 106be48fe1..61df7b8398 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -10,7 +10,7 @@ import ( func RandVal(i int) types.ValidatorUpdate { pubkey := tmrand.Bytes(32) power := tmrand.Uint16() + 1 - v := types.Ed25519ValidatorUpdate(pubkey, int64(power)) + v := types.UpdateValidator(pubkey, int64(power), "") return v } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 49dbd61d7b..f17ede647e 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -1,6 +1,7 @@ package kvstore import ( + "context" "fmt" "io/ioutil" "sort" @@ -23,6 +24,8 @@ const ( testValue = "def" ) +var ctx = context.Background() + func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { req := types.RequestDeliverTx{Tx: tx} ar := app.DeliverTx(req) @@ -323,23 +326,23 @@ func runClientTests(t *testing.T, client abcicli.Client) { } func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { - ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx}) + ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // repeating tx doesn't raise error - ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx}) + ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // commit - _, err = app.CommitSync() + _, err = app.CommitSync(ctx) require.NoError(t, err) - info, err := app.InfoSync(types.RequestInfo{}) + info, err := app.InfoSync(ctx, types.RequestInfo{}) require.NoError(t, err) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery, err := app.QuerySync(types.RequestQuery{ + resQuery, err := app.QuerySync(ctx, types.RequestQuery{ Path: "/store", Data: []byte(key), }) @@ -350,7 +353,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery, err = app.QuerySync(types.RequestQuery{ + resQuery, err = app.QuerySync(ctx, types.RequestQuery{ Path: "/store", Data: []byte(key), Prove: true, diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index e06efb4c37..0a464aaddf 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -239,7 +239,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon } // update - return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, power)) + return app.updateValidator(types.UpdateValidator(pubkey, power, "")) } // add, update, or remove a validator diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 6fa2696a3a..db386d8d63 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -2,6 +2,7 @@ package testsuite import ( "bytes" + "context" "errors" "fmt" @@ -10,15 +11,17 @@ import ( tmrand "github.com/lazyledger/lazyledger-core/libs/rand" ) +var ctx = context.Background() + func InitChain(client abcicli.Client) error { total := 10 vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { pubkey := tmrand.Bytes(33) power := tmrand.Int() - vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power)) + vals[i] = types.UpdateValidator(pubkey, int64(power), "") } - _, err := client.InitChainSync(types.RequestInitChain{ + _, err := client.InitChainSync(ctx, types.RequestInitChain{ Validators: vals, }) if err != nil { @@ -30,7 +33,7 @@ func InitChain(client abcicli.Client) error { } func Commit(client abcicli.Client, hashExp []byte) error { - res, err := client.CommitSync() + res, err := client.CommitSync(ctx) data := res.Data if err != nil { fmt.Println("Failed test: Commit") @@ -47,7 +50,7 @@ func Commit(client abcicli.Client, hashExp []byte) error { } func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes}) + res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: DeliverTx") @@ -66,7 +69,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] } func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes}) + res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: CheckTx") diff --git a/abci/tests/test_app/app.go b/abci/tests/test_app/app.go index f26e079f66..1d8ccd8df7 100644 --- a/abci/tests/test_app/app.go +++ b/abci/tests/test_app/app.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "context" "fmt" "os" @@ -10,6 +11,8 @@ import ( "github.com/lazyledger/lazyledger-core/libs/log" ) +var ctx = context.Background() + func startClient(abciType string) abcicli.Client { // Start client client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true) @@ -26,7 +29,7 @@ func startClient(abciType string) abcicli.Client { } func commit(client abcicli.Client, hashExp []byte) { - res, err := client.CommitSync() + res, err := client.CommitSync(ctx) if err != nil { panicf("client error: %v", err) } @@ -36,7 +39,7 @@ func commit(client abcicli.Client, hashExp []byte) { } func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) { - res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes}) + res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) if err != nil { panicf("client error: %v", err) } diff --git a/abci/types/application.go b/abci/types/application.go index 04dd5dd9b9..4a46351a2b 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -1,7 +1,7 @@ package types import ( - context "golang.org/x/net/context" + "context" ) // Application is an interface that enables any finite, deterministic state machine diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index 2493c73ac6..64a2577b5a 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -1,16 +1,16 @@ package types import ( + fmt "fmt" + "github.com/lazyledger/lazyledger-core/crypto/ed25519" cryptoenc "github.com/lazyledger/lazyledger-core/crypto/encoding" -) - -const ( - PubKeyEd25519 = "ed25519" + "github.com/lazyledger/lazyledger-core/crypto/secp256k1" ) func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { pke := ed25519.PubKey(pk) + pkp, err := cryptoenc.PubKeyToProto(pke) if err != nil { panic(err) @@ -22,3 +22,23 @@ func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { Power: power, } } + +func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { + switch keyType { + case "", ed25519.KeyType: + return Ed25519ValidatorUpdate(pk, power) + case secp256k1.KeyType: + pke := secp256k1.PubKey(pk) + pkp, err := cryptoenc.PubKeyToProto(pke) + if err != nil { + panic(err) + } + return ValidatorUpdate{ + // Address: + PubKey: pkp, + Power: power, + } + default: + panic(fmt.Sprintf("key type %s not supported", keyType)) + } +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 4a48f1ca6a..ca723b42e0 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -498,6 +498,7 @@ type RequestInfo struct { Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` + AbciVersion string `protobuf:"bytes,4,opt,name=abci_version,json=abciVersion,proto3" json:"abci_version,omitempty"` } func (m *RequestInfo) Reset() { *m = RequestInfo{} } @@ -554,6 +555,13 @@ func (m *RequestInfo) GetP2PVersion() uint64 { return 0 } +func (m *RequestInfo) GetAbciVersion() string { + if m != nil { + return m.AbciVersion + } + return "" +} + type RequestInitChain struct { Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` @@ -1599,7 +1607,8 @@ func (m *ResponseFlush) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo type ResponseInfo struct { - Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // this is the software version of the application. TODO: remove? Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` @@ -3180,181 +3189,182 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 2774 bytes of a gzipped FileDescriptorProto + // 2788 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x77, 0x23, 0xc5, - 0x11, 0xd7, 0xf7, 0x47, 0xd9, 0x92, 0xe5, 0xde, 0x65, 0x11, 0xc3, 0x62, 0x2f, 0xc3, 0x83, 0x2c, - 0x1b, 0xb0, 0xc1, 0x3c, 0x36, 0xf0, 0x20, 0x01, 0x4b, 0x68, 0x91, 0x59, 0x63, 0x3b, 0x6d, 0xed, - 0x92, 0x2f, 0x76, 0x68, 0xcd, 0xb4, 0xa5, 0x61, 0xa5, 0x99, 0x61, 0x66, 0x64, 0x6c, 0x8e, 0xf9, + 0x11, 0xd7, 0xe8, 0x5b, 0x25, 0x4b, 0x96, 0x7b, 0x97, 0x45, 0x88, 0xc5, 0x5e, 0x86, 0x07, 0x59, + 0x36, 0x60, 0x83, 0x79, 0x6c, 0xe0, 0x41, 0x02, 0x96, 0xd0, 0x22, 0xb3, 0xc6, 0x76, 0xda, 0xda, + 0x25, 0x5f, 0xec, 0xd0, 0xd2, 0xb4, 0xa5, 0x61, 0xa5, 0x99, 0x61, 0x66, 0x64, 0x6c, 0x8e, 0xf9, 0xb8, 0x90, 0x43, 0x38, 0xe6, 0xc2, 0xff, 0x91, 0x53, 0x2e, 0xb9, 0xf0, 0x5e, 0x2e, 0x1c, 0x73, - 0x22, 0x79, 0xec, 0x2d, 0xb7, 0x9c, 0x72, 0xca, 0x4b, 0x5e, 0x7f, 0xcc, 0x97, 0xa4, 0xb1, 0xe4, + 0x22, 0x79, 0xec, 0x2d, 0xb7, 0x9c, 0x72, 0xca, 0x4b, 0x5e, 0x7f, 0xcc, 0x97, 0xa4, 0x91, 0xe4, 0x90, 0x5b, 0x6e, 0xdd, 0x35, 0x55, 0xa5, 0xe9, 0x9a, 0xee, 0x5f, 0xfd, 0xaa, 0x5a, 0xf0, 0xa4, - 0x4f, 0x2d, 0x83, 0xba, 0x63, 0xd3, 0xf2, 0xb7, 0x49, 0x5f, 0x37, 0xb7, 0xfd, 0x73, 0x87, 0x7a, - 0x5b, 0x8e, 0x6b, 0xfb, 0x36, 0x5a, 0x8b, 0x1e, 0x6e, 0xb1, 0x87, 0xca, 0x53, 0x31, 0x6d, 0xdd, - 0x3d, 0x77, 0x7c, 0x7b, 0xdb, 0x71, 0x6d, 0xfb, 0x44, 0xe8, 0x2b, 0xd7, 0x63, 0x8f, 0xb9, 0x9f, - 0xb8, 0xb7, 0xc4, 0x53, 0x69, 0xfc, 0x90, 0x9e, 0x07, 0x4f, 0x9f, 0x9a, 0xb1, 0x75, 0x88, 0x4b, - 0xc6, 0xc1, 0xe3, 0xcd, 0x81, 0x6d, 0x0f, 0x46, 0x74, 0x9b, 0xcf, 0xfa, 0x93, 0x93, 0x6d, 0xdf, - 0x1c, 0x53, 0xcf, 0x27, 0x63, 0x47, 0x2a, 0x5c, 0x1d, 0xd8, 0x03, 0x9b, 0x0f, 0xb7, 0xd9, 0x48, - 0x48, 0xd5, 0x2f, 0x2b, 0x50, 0xc6, 0xf4, 0x93, 0x09, 0xf5, 0x7c, 0xb4, 0x03, 0x05, 0xaa, 0x0f, - 0xed, 0x66, 0xf6, 0x46, 0xf6, 0xe6, 0xca, 0xce, 0xf5, 0xad, 0xa9, 0xc5, 0x6d, 0x49, 0xbd, 0x8e, - 0x3e, 0xb4, 0xbb, 0x19, 0xcc, 0x75, 0xd1, 0xab, 0x50, 0x3c, 0x19, 0x4d, 0xbc, 0x61, 0x33, 0xc7, - 0x8d, 0x9e, 0x4a, 0x33, 0xba, 0xc3, 0x94, 0xba, 0x19, 0x2c, 0xb4, 0xd9, 0x4f, 0x99, 0xd6, 0x89, - 0xdd, 0xcc, 0x5f, 0xfc, 0x53, 0x7b, 0xd6, 0x09, 0xff, 0x29, 0xa6, 0x8b, 0x5a, 0x00, 0xa6, 0x65, - 0xfa, 0x9a, 0x3e, 0x24, 0xa6, 0xd5, 0x2c, 0x70, 0xcb, 0xa7, 0xd3, 0x2d, 0x4d, 0xbf, 0xcd, 0x14, - 0xbb, 0x19, 0x5c, 0x35, 0x83, 0x09, 0x7b, 0xdd, 0x4f, 0x26, 0xd4, 0x3d, 0x6f, 0x16, 0x2f, 0x7e, - 0xdd, 0x1f, 0x33, 0x25, 0xf6, 0xba, 0x5c, 0x1b, 0x75, 0x60, 0xa5, 0x4f, 0x07, 0xa6, 0xa5, 0xf5, - 0x47, 0xb6, 0xfe, 0xb0, 0x59, 0xe2, 0xc6, 0x6a, 0x9a, 0x71, 0x8b, 0xa9, 0xb6, 0x98, 0x66, 0x37, - 0x83, 0xa1, 0x1f, 0xce, 0xd0, 0x9b, 0x50, 0xd1, 0x87, 0x54, 0x7f, 0xa8, 0xf9, 0x67, 0xcd, 0x32, - 0xf7, 0xb1, 0x99, 0xe6, 0xa3, 0xcd, 0xf4, 0x7a, 0x67, 0xdd, 0x0c, 0x2e, 0xeb, 0x62, 0xc8, 0xd6, - 0x6f, 0xd0, 0x91, 0x79, 0x4a, 0x5d, 0x66, 0x5f, 0xb9, 0x78, 0xfd, 0xef, 0x08, 0x4d, 0xee, 0xa1, - 0x6a, 0x04, 0x13, 0xf4, 0x16, 0x54, 0xa9, 0x65, 0xc8, 0x65, 0x54, 0xb9, 0x8b, 0x1b, 0xa9, 0xdf, - 0xd9, 0x32, 0x82, 0x45, 0x54, 0xa8, 0x1c, 0xa3, 0xd7, 0xa0, 0xa4, 0xdb, 0xe3, 0xb1, 0xe9, 0x37, - 0x81, 0x5b, 0x6f, 0xa4, 0x2e, 0x80, 0x6b, 0x75, 0x33, 0x58, 0xea, 0xa3, 0x03, 0xa8, 0x8f, 0x4c, - 0xcf, 0xd7, 0x3c, 0x8b, 0x38, 0xde, 0xd0, 0xf6, 0xbd, 0xe6, 0x0a, 0xf7, 0xf0, 0x6c, 0x9a, 0x87, - 0x7d, 0xd3, 0xf3, 0x8f, 0x03, 0xe5, 0x6e, 0x06, 0xd7, 0x46, 0x71, 0x01, 0xf3, 0x67, 0x9f, 0x9c, - 0x50, 0x37, 0x74, 0xd8, 0x5c, 0xbd, 0xd8, 0xdf, 0x21, 0xd3, 0x0e, 0xec, 0x99, 0x3f, 0x3b, 0x2e, - 0x40, 0x3f, 0x87, 0x2b, 0x23, 0x9b, 0x18, 0xa1, 0x3b, 0x4d, 0x1f, 0x4e, 0xac, 0x87, 0xcd, 0x1a, - 0x77, 0xfa, 0x7c, 0xea, 0x4b, 0xda, 0xc4, 0x08, 0x5c, 0xb4, 0x99, 0x41, 0x37, 0x83, 0xd7, 0x47, - 0xd3, 0x42, 0xf4, 0x00, 0xae, 0x12, 0xc7, 0x19, 0x9d, 0x4f, 0x7b, 0xaf, 0x73, 0xef, 0xb7, 0xd2, - 0xbc, 0xef, 0x32, 0x9b, 0x69, 0xf7, 0x88, 0xcc, 0x48, 0x59, 0x30, 0x1c, 0x97, 0x3a, 0xae, 0xad, - 0x53, 0xcf, 0xd3, 0xfc, 0x33, 0xaf, 0xb9, 0x76, 0x71, 0x30, 0x8e, 0x42, 0xed, 0xde, 0x19, 0x0f, - 0xae, 0x13, 0x17, 0xb4, 0xca, 0x50, 0x3c, 0x25, 0xa3, 0x09, 0x55, 0xbf, 0x07, 0x2b, 0xb1, 0x63, - 0x8f, 0x9a, 0x50, 0x1e, 0x53, 0xcf, 0x23, 0x03, 0xca, 0x51, 0xa2, 0x8a, 0x83, 0xa9, 0x5a, 0x87, - 0xd5, 0xf8, 0x51, 0x57, 0xc7, 0xa1, 0x21, 0x3b, 0xc4, 0xcc, 0xf0, 0x94, 0xba, 0x9e, 0x69, 0x5b, - 0x81, 0xa1, 0x9c, 0xa2, 0x67, 0xa0, 0xc6, 0xb7, 0xa3, 0x16, 0x3c, 0x67, 0x48, 0x52, 0xc0, 0xab, - 0x5c, 0x78, 0x5f, 0x2a, 0x6d, 0xc2, 0x8a, 0xb3, 0xe3, 0x84, 0x2a, 0x79, 0xae, 0x02, 0xce, 0x8e, - 0x23, 0x15, 0xd4, 0x3f, 0xe7, 0xa0, 0x31, 0x7d, 0xf4, 0xd1, 0x6b, 0x50, 0x60, 0x28, 0x28, 0x01, - 0x4d, 0xd9, 0x12, 0x10, 0xb9, 0x15, 0x40, 0xe4, 0x56, 0x2f, 0x80, 0xc8, 0x56, 0xe5, 0xab, 0x6f, - 0x36, 0x33, 0x5f, 0xfc, 0x75, 0x33, 0x8b, 0xb9, 0x05, 0x7a, 0x82, 0x9d, 0x54, 0x62, 0x5a, 0x9a, - 0x69, 0xf0, 0xf7, 0xa9, 0xb2, 0x63, 0x48, 0x4c, 0x6b, 0xcf, 0x40, 0x77, 0xa1, 0xa1, 0xdb, 0x96, - 0x47, 0x2d, 0x6f, 0xe2, 0x69, 0x02, 0x82, 0x25, 0x8c, 0xcd, 0x9e, 0xa4, 0x76, 0xa0, 0x78, 0xc4, - 0xf5, 0xf0, 0x9a, 0x9e, 0x14, 0xa0, 0x3b, 0x00, 0xa7, 0x64, 0x64, 0x1a, 0xc4, 0xb7, 0x5d, 0xaf, - 0x59, 0xb8, 0x91, 0x9f, 0xeb, 0xe6, 0x7e, 0xa0, 0x72, 0xcf, 0x31, 0x88, 0x4f, 0x5b, 0x05, 0xf6, - 0xb6, 0x38, 0x66, 0x89, 0x9e, 0x83, 0x35, 0xe2, 0x38, 0x9a, 0xe7, 0x13, 0x9f, 0x6a, 0xfd, 0x73, - 0x9f, 0x7a, 0x1c, 0xe1, 0x56, 0x71, 0x8d, 0x38, 0xce, 0x31, 0x93, 0xb6, 0x98, 0x10, 0x3d, 0x0b, - 0x75, 0x06, 0x86, 0x26, 0x19, 0x69, 0x43, 0x6a, 0x0e, 0x86, 0x3e, 0xc7, 0xb2, 0x3c, 0xae, 0x49, - 0x69, 0x97, 0x0b, 0x55, 0x23, 0xfc, 0x98, 0x1c, 0x08, 0x11, 0x82, 0x82, 0x41, 0x7c, 0xc2, 0x03, - 0xb9, 0x8a, 0xf9, 0x98, 0xc9, 0x1c, 0xe2, 0x0f, 0x65, 0x78, 0xf8, 0x18, 0x5d, 0x83, 0x92, 0x74, - 0x9b, 0xe7, 0x6e, 0xe5, 0x0c, 0x5d, 0x85, 0xa2, 0xe3, 0xda, 0xa7, 0x94, 0xa3, 0x76, 0x05, 0x8b, - 0x89, 0xfa, 0xeb, 0x1c, 0xac, 0xcf, 0x40, 0x26, 0xf3, 0x3b, 0x24, 0xde, 0x30, 0xf8, 0x2d, 0x36, - 0x46, 0xb7, 0x99, 0x5f, 0x62, 0x50, 0x57, 0xa6, 0x99, 0x66, 0x3c, 0x44, 0x22, 0x85, 0x76, 0xf9, - 0x73, 0x19, 0x1a, 0xa9, 0x8d, 0x0e, 0xa1, 0x31, 0x22, 0x9e, 0xaf, 0x09, 0x08, 0xd2, 0x62, 0x29, - 0x67, 0x16, 0x78, 0xf7, 0x49, 0x00, 0x5a, 0x6c, 0xc3, 0x4a, 0x47, 0xf5, 0x51, 0x42, 0x8a, 0x30, - 0x5c, 0xed, 0x9f, 0x7f, 0x46, 0x2c, 0xdf, 0xb4, 0xa8, 0x36, 0xf3, 0xe5, 0x9e, 0x98, 0x71, 0xda, - 0x39, 0x35, 0x0d, 0x6a, 0xe9, 0xc1, 0x27, 0xbb, 0x12, 0x1a, 0x87, 0x9f, 0xd4, 0x53, 0x31, 0xd4, - 0x93, 0xa0, 0x8f, 0xea, 0x90, 0xf3, 0xcf, 0x64, 0x00, 0x72, 0xfe, 0x19, 0x7a, 0x09, 0x0a, 0x6c, - 0x91, 0x7c, 0xf1, 0xf5, 0x39, 0xd9, 0x52, 0xda, 0xf5, 0xce, 0x1d, 0x8a, 0xb9, 0xa6, 0xaa, 0x86, - 0xa7, 0x21, 0x4c, 0x04, 0xd3, 0x5e, 0xd5, 0xe7, 0x61, 0x6d, 0x0a, 0xe9, 0x63, 0xdf, 0x2f, 0x1b, - 0xff, 0x7e, 0xea, 0x1a, 0xd4, 0x12, 0xb0, 0xae, 0x5e, 0x83, 0xab, 0xf3, 0x50, 0x5a, 0x1d, 0x86, - 0xf2, 0x04, 0xda, 0xa2, 0x57, 0xa1, 0x12, 0xc2, 0xb4, 0x38, 0x8d, 0xb3, 0xb1, 0x0a, 0x94, 0x71, - 0xa8, 0xca, 0x8e, 0x21, 0xdb, 0xd6, 0x7c, 0x3f, 0xe4, 0xf8, 0x8b, 0x97, 0x89, 0xe3, 0x74, 0x89, - 0x37, 0x54, 0x3f, 0x82, 0x66, 0x1a, 0x04, 0x4f, 0x2d, 0xa3, 0x10, 0x6e, 0xc3, 0x6b, 0x50, 0x3a, - 0xb1, 0xdd, 0x31, 0xf1, 0xb9, 0xb3, 0x1a, 0x96, 0x33, 0xb6, 0x3d, 0x05, 0x1c, 0xe7, 0xb9, 0x58, - 0x4c, 0x54, 0x0d, 0x9e, 0x48, 0x85, 0x61, 0x66, 0x62, 0x5a, 0x06, 0x15, 0xf1, 0xac, 0x61, 0x31, - 0x89, 0x1c, 0x89, 0x97, 0x15, 0x13, 0xf6, 0xb3, 0x1e, 0x5f, 0x2b, 0xf7, 0x5f, 0xc5, 0x72, 0xa6, - 0xde, 0x0c, 0x83, 0x95, 0x40, 0x63, 0xd4, 0x80, 0x3c, 0x43, 0xf0, 0xec, 0x8d, 0xfc, 0xcd, 0x55, - 0xcc, 0x86, 0xea, 0x3f, 0x2a, 0x50, 0xc1, 0xd4, 0x73, 0x18, 0x7a, 0xa0, 0x16, 0x54, 0xe9, 0x99, - 0x4e, 0x1d, 0x3f, 0x00, 0xd3, 0xf9, 0x54, 0x44, 0x68, 0x77, 0x02, 0x4d, 0xc6, 0x03, 0x42, 0x33, - 0xf4, 0x8a, 0xa4, 0x7a, 0xe9, 0xac, 0x4d, 0x9a, 0xc7, 0xb9, 0xde, 0xed, 0x80, 0xeb, 0xe5, 0x53, - 0x53, 0xbf, 0xb0, 0x9a, 0x22, 0x7b, 0xaf, 0x48, 0xb2, 0x57, 0x58, 0xf0, 0x63, 0x09, 0xb6, 0xd7, - 0x4e, 0xb0, 0xbd, 0xe2, 0x82, 0x65, 0xa6, 0xd0, 0xbd, 0xdb, 0x01, 0xdd, 0x2b, 0x2d, 0x78, 0xe3, - 0x29, 0xbe, 0x77, 0x27, 0xc9, 0xf7, 0x04, 0x57, 0x7b, 0x26, 0xd5, 0x3a, 0x95, 0xf0, 0xfd, 0x30, - 0x46, 0xf8, 0x2a, 0xa9, 0x6c, 0x4b, 0x38, 0x99, 0xc3, 0xf8, 0xda, 0x09, 0xc6, 0x57, 0x5d, 0x10, - 0x83, 0x14, 0xca, 0xf7, 0x76, 0x9c, 0xf2, 0x41, 0x2a, 0x6b, 0x94, 0xdf, 0x7b, 0x1e, 0xe7, 0x7b, - 0x3d, 0xe4, 0x7c, 0x2b, 0xa9, 0xa4, 0x55, 0xae, 0x61, 0x9a, 0xf4, 0x1d, 0xce, 0x90, 0x3e, 0x41, - 0xd2, 0x9e, 0x4b, 0x75, 0xb1, 0x80, 0xf5, 0x1d, 0xce, 0xb0, 0xbe, 0xda, 0x02, 0x87, 0x0b, 0x68, - 0xdf, 0x2f, 0xe6, 0xd3, 0xbe, 0x74, 0x62, 0x26, 0x5f, 0x73, 0x39, 0xde, 0xa7, 0xa5, 0xf0, 0x3e, - 0xc1, 0xce, 0xbe, 0x9f, 0xea, 0x7e, 0x69, 0xe2, 0x77, 0x38, 0x43, 0xfc, 0x1a, 0x0b, 0xe2, 0xb1, - 0x2c, 0xf3, 0x7b, 0x9e, 0x25, 0xe7, 0x29, 0x10, 0x61, 0x00, 0x47, 0x5d, 0xd7, 0x76, 0x25, 0x89, - 0x13, 0x13, 0xf5, 0x26, 0xa3, 0x0b, 0x11, 0x60, 0x5c, 0xc0, 0x12, 0x79, 0x22, 0x89, 0x81, 0x84, - 0xfa, 0x87, 0x6c, 0x64, 0xcb, 0x33, 0x6c, 0x9c, 0x6a, 0x54, 0x25, 0xd5, 0x88, 0x91, 0xc7, 0x5c, - 0x92, 0x3c, 0x6e, 0xc2, 0x0a, 0x4b, 0x10, 0x53, 0xbc, 0x90, 0x38, 0x01, 0x2f, 0x44, 0xb7, 0x60, - 0x9d, 0x33, 0x00, 0x41, 0x31, 0x65, 0x56, 0x28, 0xf0, 0xe4, 0xb6, 0xc6, 0x1e, 0x88, 0xdd, 0x2e, - 0xd2, 0xc3, 0x8b, 0x70, 0x25, 0xa6, 0x1b, 0x26, 0x1e, 0x41, 0xa4, 0x1a, 0xa1, 0xf6, 0xae, 0xcc, - 0x40, 0x7f, 0xca, 0x46, 0x11, 0x8a, 0x38, 0xe7, 0x3c, 0x7a, 0x98, 0xfd, 0xdf, 0xd0, 0xc3, 0xdc, - 0x7f, 0x4d, 0x0f, 0xe3, 0x79, 0x34, 0x9f, 0xcc, 0xa3, 0xff, 0xcc, 0x46, 0x9f, 0x24, 0x24, 0x7b, - 0xba, 0x6d, 0x50, 0x99, 0xd9, 0xf8, 0x98, 0xa5, 0xa4, 0x91, 0x3d, 0x90, 0xf9, 0x8b, 0x0d, 0x99, - 0x56, 0x08, 0xea, 0x55, 0x89, 0xd9, 0x61, 0x52, 0x2c, 0xf2, 0x00, 0xcb, 0xa4, 0xd8, 0x80, 0xfc, - 0x43, 0x2a, 0x20, 0x78, 0x15, 0xb3, 0x21, 0xd3, 0xe3, 0x7b, 0x8c, 0x03, 0xeb, 0x2a, 0x16, 0x13, - 0xf4, 0x1a, 0x54, 0x79, 0xaf, 0x44, 0xb3, 0x1d, 0x4f, 0xa2, 0xe5, 0x93, 0xf1, 0xb5, 0x8a, 0x96, - 0xc8, 0xd6, 0x11, 0xd3, 0x39, 0x74, 0x3c, 0x5c, 0x71, 0xe4, 0x28, 0x96, 0xef, 0xab, 0x09, 0xda, - 0x79, 0x1d, 0xaa, 0xec, 0xed, 0x3d, 0x87, 0xe8, 0x94, 0x43, 0x5f, 0x15, 0x47, 0x02, 0xf5, 0x01, - 0xa0, 0x59, 0x00, 0x47, 0x5d, 0x28, 0xd1, 0x53, 0x6a, 0xf9, 0x22, 0xff, 0xae, 0xec, 0x5c, 0x9b, - 0xc3, 0xe9, 0xa8, 0xe5, 0xb7, 0x9a, 0x2c, 0xc8, 0x7f, 0xff, 0x66, 0xb3, 0x21, 0xb4, 0x5f, 0xb0, - 0xc7, 0xa6, 0x4f, 0xc7, 0x8e, 0x7f, 0x8e, 0xa5, 0xbd, 0xfa, 0xab, 0x1c, 0x23, 0x58, 0x09, 0x70, - 0x9f, 0x1b, 0xdb, 0x60, 0xc7, 0xe7, 0x62, 0xe4, 0x7a, 0xb9, 0x78, 0x6f, 0x00, 0x0c, 0x88, 0xa7, - 0x7d, 0x4a, 0x2c, 0x9f, 0x1a, 0x32, 0xe8, 0x31, 0x09, 0x52, 0xa0, 0xc2, 0x66, 0x13, 0x8f, 0x1a, - 0x92, 0xe7, 0x87, 0xf3, 0xd8, 0x3a, 0xcb, 0xdf, 0x6d, 0x9d, 0xc9, 0x28, 0x57, 0xa6, 0xa3, 0xfc, - 0x9b, 0x5c, 0x74, 0x4a, 0x22, 0x2e, 0xfa, 0xff, 0x17, 0x87, 0xdf, 0xf2, 0x02, 0x35, 0x99, 0x65, - 0xd1, 0x31, 0xac, 0x87, 0xa7, 0x54, 0x9b, 0xf0, 0xd3, 0x1b, 0xec, 0xbb, 0x65, 0x8f, 0x79, 0xe3, - 0x34, 0x29, 0xf6, 0xd0, 0x4f, 0xe0, 0xf1, 0x29, 0x04, 0x0a, 0x5d, 0xe7, 0x96, 0x04, 0xa2, 0xc7, - 0x92, 0x40, 0x14, 0x78, 0x8e, 0x62, 0x95, 0xff, 0x8e, 0x67, 0x63, 0x8f, 0xd5, 0x3c, 0x71, 0xce, - 0x30, 0xf7, 0xeb, 0x3f, 0x03, 0x35, 0x97, 0xfa, 0xac, 0x0c, 0x4f, 0x54, 0x95, 0xab, 0x42, 0x28, - 0x6b, 0xd5, 0x23, 0x78, 0x6c, 0x2e, 0x77, 0x40, 0x3f, 0x80, 0x6a, 0x44, 0x3b, 0xb2, 0x29, 0x05, - 0x5a, 0x58, 0x74, 0x44, 0xba, 0xea, 0x1f, 0xb3, 0x91, 0xcb, 0x64, 0x19, 0xd3, 0x81, 0x92, 0x4b, - 0xbd, 0xc9, 0x48, 0x14, 0x16, 0xf5, 0x9d, 0x17, 0x97, 0x63, 0x1d, 0x4c, 0x3a, 0x19, 0xf9, 0x58, - 0x1a, 0xab, 0x0f, 0xa0, 0x24, 0x24, 0x68, 0x05, 0xca, 0xf7, 0x0e, 0xee, 0x1e, 0x1c, 0x7e, 0x70, - 0xd0, 0xc8, 0x20, 0x80, 0xd2, 0x6e, 0xbb, 0xdd, 0x39, 0xea, 0x35, 0xb2, 0xa8, 0x0a, 0xc5, 0xdd, - 0xd6, 0x21, 0xee, 0x35, 0x72, 0x4c, 0x8c, 0x3b, 0xef, 0x75, 0xda, 0xbd, 0x46, 0x1e, 0xad, 0x43, - 0x4d, 0x8c, 0xb5, 0x3b, 0x87, 0xf8, 0xfd, 0xdd, 0x5e, 0xa3, 0x10, 0x13, 0x1d, 0x77, 0x0e, 0xde, - 0xe9, 0xe0, 0x46, 0x51, 0x7d, 0x99, 0x55, 0x2e, 0x29, 0x3c, 0x25, 0xaa, 0x51, 0xb2, 0xb1, 0x1a, - 0x45, 0xfd, 0x7d, 0x0e, 0x94, 0x74, 0xf2, 0x81, 0xde, 0x9b, 0x5a, 0xf8, 0xce, 0x25, 0x98, 0xcb, - 0xd4, 0xea, 0xd1, 0xb3, 0x50, 0x77, 0xe9, 0x09, 0xf5, 0xf5, 0xa1, 0x20, 0x43, 0x22, 0xb1, 0xd5, - 0x70, 0x4d, 0x4a, 0xb9, 0x91, 0x27, 0xd4, 0x3e, 0xa6, 0xba, 0xaf, 0x89, 0x72, 0x49, 0x6c, 0xba, - 0x2a, 0x53, 0x63, 0xd2, 0x63, 0x21, 0x54, 0x3f, 0xba, 0x54, 0x2c, 0xab, 0x50, 0xc4, 0x9d, 0x1e, - 0xfe, 0x69, 0x23, 0x8f, 0x10, 0xd4, 0xf9, 0x50, 0x3b, 0x3e, 0xd8, 0x3d, 0x3a, 0xee, 0x1e, 0xb2, - 0x58, 0x5e, 0x81, 0xb5, 0x20, 0x96, 0x81, 0xb0, 0xa8, 0x92, 0x68, 0x37, 0x2c, 0xa8, 0xd3, 0xd0, - 0x6d, 0xa8, 0x48, 0xa6, 0x13, 0x9c, 0x35, 0x65, 0xb6, 0x53, 0xf1, 0xbe, 0xd4, 0xc0, 0xa1, 0xae, - 0xfa, 0xef, 0x2c, 0xac, 0x4d, 0x9d, 0x41, 0xb4, 0x03, 0x45, 0xc1, 0xd9, 0xd3, 0xda, 0xf1, 0x1c, - 0x42, 0xe4, 0x81, 0x15, 0xaa, 0xe8, 0x4d, 0xa8, 0x50, 0xd9, 0x71, 0x98, 0x77, 0xd6, 0xc5, 0xef, - 0x07, 0x3d, 0x09, 0x69, 0x1a, 0x5a, 0xa0, 0xb7, 0xa0, 0x1a, 0x82, 0x89, 0xac, 0xf1, 0x9e, 0x9e, - 0x35, 0x0f, 0x61, 0x48, 0xda, 0x47, 0x36, 0xe8, 0xf5, 0x88, 0xa7, 0x15, 0x66, 0x2b, 0x05, 0x69, - 0x2e, 0x14, 0xa4, 0x71, 0xa0, 0xaf, 0xb6, 0x61, 0x25, 0xb6, 0x1e, 0xf4, 0x24, 0x54, 0xc7, 0xe4, - 0x4c, 0x76, 0xb2, 0x44, 0x2f, 0xa2, 0x32, 0x26, 0x67, 0xa2, 0x89, 0xf5, 0x38, 0x94, 0xd9, 0xc3, - 0x01, 0x11, 0x41, 0xce, 0xe3, 0xd2, 0x98, 0x9c, 0xbd, 0x4b, 0x3c, 0xf5, 0x43, 0xa8, 0x27, 0xbb, - 0x38, 0x6c, 0xb3, 0xbb, 0xf6, 0xc4, 0x32, 0xb8, 0x8f, 0x22, 0x16, 0x13, 0xf4, 0x2a, 0x14, 0x4f, - 0x6d, 0x81, 0x87, 0xf3, 0x51, 0xe1, 0xbe, 0xed, 0xd3, 0x58, 0x17, 0x48, 0x68, 0xab, 0x9f, 0x41, - 0x91, 0xe3, 0x1b, 0xc3, 0x2a, 0xde, 0x8f, 0x91, 0x1c, 0x95, 0x8d, 0xd1, 0x87, 0x00, 0xc4, 0xf7, - 0x5d, 0xb3, 0x3f, 0x89, 0x1c, 0x6f, 0xce, 0xc7, 0xc7, 0xdd, 0x40, 0xaf, 0x75, 0x5d, 0x02, 0xe5, - 0xd5, 0xc8, 0x34, 0x06, 0x96, 0x31, 0x87, 0xea, 0x01, 0xd4, 0x93, 0xb6, 0x01, 0xad, 0xca, 0xce, - 0xa1, 0x55, 0xb9, 0x38, 0xad, 0x0a, 0x49, 0x59, 0x5e, 0xf4, 0xde, 0xf8, 0x44, 0xfd, 0x3c, 0x0b, - 0x95, 0xde, 0x99, 0x3c, 0x39, 0x29, 0x6d, 0x9f, 0xc8, 0x34, 0x17, 0x6f, 0x72, 0x88, 0x3e, 0x52, - 0x3e, 0xec, 0x4e, 0xbd, 0x1d, 0x62, 0x43, 0x61, 0xd9, 0x0a, 0x35, 0x68, 0xd3, 0x49, 0x3c, 0x7c, - 0x03, 0xaa, 0xe1, 0xae, 0x62, 0x64, 0x9f, 0x18, 0x86, 0x4b, 0x3d, 0x4f, 0xae, 0x2d, 0x98, 0xf2, - 0x2e, 0xa2, 0xfd, 0xa9, 0x6c, 0xa3, 0xe4, 0xb1, 0x98, 0xa8, 0x06, 0xac, 0x4d, 0x65, 0x46, 0xf4, - 0x06, 0x94, 0x9d, 0x49, 0x5f, 0x0b, 0xc2, 0x33, 0x75, 0x78, 0x02, 0x1e, 0x39, 0xe9, 0x8f, 0x4c, - 0xfd, 0x2e, 0x3d, 0x0f, 0x5e, 0xc6, 0x99, 0xf4, 0xef, 0x8a, 0x28, 0x8a, 0x5f, 0xc9, 0xc5, 0x7f, - 0xe5, 0x14, 0x2a, 0xc1, 0xa6, 0x40, 0x3f, 0x8a, 0x9f, 0x93, 0xec, 0xec, 0x31, 0x4f, 0x66, 0x6b, - 0xe9, 0x3e, 0x76, 0x4c, 0x6e, 0xc1, 0xba, 0x67, 0x0e, 0x2c, 0x6a, 0x68, 0x51, 0xb9, 0xc1, 0x7f, - 0xad, 0x82, 0xd7, 0xc4, 0x83, 0xfd, 0xa0, 0xd6, 0x50, 0xff, 0x95, 0x85, 0x4a, 0x70, 0x60, 0xd1, - 0xcb, 0xb1, 0x7d, 0x57, 0x9f, 0xd3, 0x48, 0x09, 0x14, 0xa3, 0x46, 0x60, 0xf2, 0x5d, 0x73, 0x97, - 0x7f, 0xd7, 0xb4, 0x8e, 0x6e, 0xd0, 0x5a, 0x2f, 0x5c, 0xba, 0xb5, 0xfe, 0x02, 0x20, 0xdf, 0xf6, - 0xc9, 0x48, 0x3b, 0xb5, 0x7d, 0xd3, 0x1a, 0x68, 0x22, 0xd8, 0x82, 0xb4, 0x35, 0xf8, 0x93, 0xfb, - 0xfc, 0xc1, 0x11, 0x8f, 0xfb, 0x2f, 0xb3, 0x50, 0x09, 0xd3, 0xef, 0x65, 0xfb, 0x7a, 0xd7, 0xa0, - 0x24, 0x33, 0x8c, 0x68, 0xec, 0xc9, 0x59, 0xd8, 0x62, 0x2e, 0xc4, 0x5a, 0xcc, 0x0a, 0x83, 0x6e, - 0x9f, 0x70, 0x0e, 0x22, 0x2a, 0xbe, 0x70, 0x7e, 0xeb, 0x75, 0x58, 0x89, 0xb5, 0x58, 0xd9, 0xc9, - 0x3b, 0xe8, 0x7c, 0xd0, 0xc8, 0x28, 0xe5, 0xcf, 0xbf, 0xbc, 0x91, 0x3f, 0xa0, 0x9f, 0xb2, 0x3d, - 0x8b, 0x3b, 0xed, 0x6e, 0xa7, 0x7d, 0xb7, 0x91, 0x55, 0x56, 0x3e, 0xff, 0xf2, 0x46, 0x19, 0x53, - 0xde, 0xc4, 0xb9, 0xd5, 0x85, 0xd5, 0xf8, 0x57, 0x49, 0x26, 0x29, 0x04, 0xf5, 0x77, 0xee, 0x1d, - 0xed, 0xef, 0xb5, 0x77, 0x7b, 0x1d, 0xed, 0xfe, 0x61, 0xaf, 0xd3, 0xc8, 0xa2, 0xc7, 0xe1, 0xca, - 0xfe, 0xde, 0xbb, 0xdd, 0x9e, 0xd6, 0xde, 0xdf, 0xeb, 0x1c, 0xf4, 0xb4, 0xdd, 0x5e, 0x6f, 0xb7, - 0x7d, 0xb7, 0x91, 0xdb, 0xf9, 0x1d, 0xc0, 0xda, 0x6e, 0xab, 0xbd, 0xc7, 0x12, 0xac, 0xa9, 0x13, - 0x5e, 0x8e, 0xb7, 0xa1, 0xc0, 0x0b, 0xee, 0x0b, 0xef, 0x6a, 0x95, 0x8b, 0xdb, 0x7b, 0xe8, 0x0e, - 0x14, 0x79, 0x2d, 0x8e, 0x2e, 0xbe, 0xbc, 0x55, 0x16, 0xf4, 0xfb, 0xd8, 0xcb, 0xf0, 0xe3, 0x71, - 0xe1, 0x6d, 0xae, 0x72, 0x71, 0xfb, 0x0f, 0x61, 0xa8, 0x46, 0x55, 0xc2, 0xe2, 0xdb, 0x4d, 0x65, - 0x09, 0xb0, 0x41, 0xfb, 0x50, 0x0e, 0xea, 0xaf, 0x45, 0xf7, 0xad, 0xca, 0xc2, 0xfe, 0x1c, 0x0b, - 0x97, 0xa8, 0x93, 0x2f, 0xbe, 0x3c, 0x56, 0x16, 0x34, 0x1b, 0xd1, 0x1e, 0x94, 0x24, 0xf5, 0x5d, - 0x70, 0x87, 0xaa, 0x2c, 0xea, 0xb7, 0xb1, 0xa0, 0x45, 0x0d, 0x88, 0xc5, 0x57, 0xe2, 0xca, 0x12, - 0x7d, 0x54, 0x74, 0x0f, 0x20, 0x56, 0x15, 0x2f, 0x71, 0xd7, 0xad, 0x2c, 0xd3, 0x1f, 0x45, 0x87, - 0x50, 0x09, 0xab, 0x9f, 0x85, 0x37, 0xcf, 0xca, 0xe2, 0x46, 0x25, 0x7a, 0x00, 0xb5, 0x24, 0xed, - 0x5f, 0xee, 0x3e, 0x59, 0x59, 0xb2, 0x03, 0xc9, 0xfc, 0x27, 0x6b, 0x80, 0xe5, 0xee, 0x97, 0x95, - 0x25, 0x1b, 0x92, 0xe8, 0x63, 0x58, 0x9f, 0xe5, 0xe8, 0xcb, 0x5f, 0x37, 0x2b, 0x97, 0x68, 0x51, - 0xa2, 0x31, 0xa0, 0x39, 0xdc, 0xfe, 0x12, 0xb7, 0xcf, 0xca, 0x65, 0x3a, 0x96, 0x2c, 0x74, 0x49, - 0xc2, 0xbc, 0xdc, 0x6d, 0xb4, 0xb2, 0x64, 0xef, 0xb2, 0xf5, 0xde, 0x57, 0xdf, 0x6e, 0x64, 0xbf, - 0xfe, 0x76, 0x23, 0xfb, 0xb7, 0x6f, 0x37, 0xb2, 0x5f, 0x3c, 0xda, 0xc8, 0x7c, 0xfd, 0x68, 0x23, - 0xf3, 0x97, 0x47, 0x1b, 0x99, 0x9f, 0xbd, 0x34, 0x30, 0xfd, 0xe1, 0xa4, 0xbf, 0xa5, 0xdb, 0xe3, - 0xed, 0x11, 0xf9, 0xec, 0x7c, 0x44, 0x8d, 0x01, 0x75, 0x63, 0xc3, 0x17, 0x75, 0xdb, 0xa5, 0xb1, - 0xff, 0xf3, 0xf4, 0x4b, 0x3c, 0x73, 0xbd, 0xf2, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0xa8, - 0x11, 0x6f, 0xef, 0x23, 0x00, 0x00, + 0x47, 0x4d, 0x9d, 0x3a, 0x63, 0xc3, 0xf4, 0x76, 0x48, 0xaf, 0x6f, 0xec, 0x78, 0x17, 0x36, 0x75, + 0xb7, 0x6d, 0xc7, 0xf2, 0x2c, 0xb4, 0x1e, 0x3e, 0xdc, 0x66, 0x0f, 0x1b, 0x4f, 0x45, 0xb4, 0xfb, + 0xce, 0x85, 0xed, 0x59, 0x3b, 0xb6, 0x63, 0x59, 0xa7, 0x42, 0xbf, 0x71, 0x3d, 0xf2, 0x98, 0xfb, + 0x89, 0x7a, 0x8b, 0x3d, 0x95, 0xc6, 0x0f, 0xe9, 0x85, 0xff, 0xf4, 0xa9, 0x19, 0x5b, 0x9b, 0x38, + 0x64, 0xec, 0x3f, 0xde, 0x1a, 0x58, 0xd6, 0x60, 0x44, 0x77, 0xf8, 0xac, 0x37, 0x39, 0xdd, 0xf1, + 0x8c, 0x31, 0x75, 0x3d, 0x32, 0xb6, 0xa5, 0xc2, 0xd5, 0x81, 0x35, 0xb0, 0xf8, 0x70, 0x87, 0x8d, + 0x84, 0x54, 0xfd, 0xb2, 0x08, 0x05, 0x4c, 0x3f, 0x99, 0x50, 0xd7, 0x43, 0xbb, 0x90, 0xa5, 0xfd, + 0xa1, 0x55, 0x57, 0x6e, 0x28, 0x37, 0xcb, 0xbb, 0xd7, 0xb7, 0xa7, 0x16, 0xb7, 0x2d, 0xf5, 0xda, + 0xfd, 0xa1, 0xd5, 0x49, 0x61, 0xae, 0x8b, 0x5e, 0x85, 0xdc, 0xe9, 0x68, 0xe2, 0x0e, 0xeb, 0x69, + 0x6e, 0xf4, 0x54, 0x92, 0xd1, 0x1d, 0xa6, 0xd4, 0x49, 0x61, 0xa1, 0xcd, 0x7e, 0xca, 0x30, 0x4f, + 0xad, 0x7a, 0x66, 0xf1, 0x4f, 0xed, 0x9b, 0xa7, 0xfc, 0xa7, 0x98, 0x2e, 0x6a, 0x02, 0x18, 0xa6, + 0xe1, 0x69, 0xfd, 0x21, 0x31, 0xcc, 0x7a, 0x96, 0x5b, 0x3e, 0x9d, 0x6c, 0x69, 0x78, 0x2d, 0xa6, + 0xd8, 0x49, 0xe1, 0x92, 0xe1, 0x4f, 0xd8, 0xeb, 0x7e, 0x32, 0xa1, 0xce, 0x45, 0x3d, 0xb7, 0xf8, + 0x75, 0x7f, 0xcc, 0x94, 0xd8, 0xeb, 0x72, 0x6d, 0xd4, 0x86, 0x72, 0x8f, 0x0e, 0x0c, 0x53, 0xeb, + 0x8d, 0xac, 0xfe, 0xc3, 0x7a, 0x9e, 0x1b, 0xab, 0x49, 0xc6, 0x4d, 0xa6, 0xda, 0x64, 0x9a, 0x9d, + 0x14, 0x86, 0x5e, 0x30, 0x43, 0x6f, 0x42, 0xb1, 0x3f, 0xa4, 0xfd, 0x87, 0x9a, 0x77, 0x5e, 0x2f, + 0x70, 0x1f, 0x5b, 0x49, 0x3e, 0x5a, 0x4c, 0xaf, 0x7b, 0xde, 0x49, 0xe1, 0x42, 0x5f, 0x0c, 0xd9, + 0xfa, 0x75, 0x3a, 0x32, 0xce, 0xa8, 0xc3, 0xec, 0x8b, 0x8b, 0xd7, 0xff, 0x8e, 0xd0, 0xe4, 0x1e, + 0x4a, 0xba, 0x3f, 0x41, 0x6f, 0x41, 0x89, 0x9a, 0xba, 0x5c, 0x46, 0x89, 0xbb, 0xb8, 0x91, 0xf8, + 0x9d, 0x4d, 0xdd, 0x5f, 0x44, 0x91, 0xca, 0x31, 0x7a, 0x0d, 0xf2, 0x7d, 0x6b, 0x3c, 0x36, 0xbc, + 0x3a, 0x70, 0xeb, 0xcd, 0xc4, 0x05, 0x70, 0xad, 0x4e, 0x0a, 0x4b, 0x7d, 0x74, 0x08, 0xd5, 0x91, + 0xe1, 0x7a, 0x9a, 0x6b, 0x12, 0xdb, 0x1d, 0x5a, 0x9e, 0x5b, 0x2f, 0x73, 0x0f, 0xcf, 0x26, 0x79, + 0x38, 0x30, 0x5c, 0xef, 0xc4, 0x57, 0xee, 0xa4, 0x70, 0x65, 0x14, 0x15, 0x30, 0x7f, 0xd6, 0xe9, + 0x29, 0x75, 0x02, 0x87, 0xf5, 0xb5, 0xc5, 0xfe, 0x8e, 0x98, 0xb6, 0x6f, 0xcf, 0xfc, 0x59, 0x51, + 0x01, 0xfa, 0x39, 0x5c, 0x19, 0x59, 0x44, 0x0f, 0xdc, 0x69, 0xfd, 0xe1, 0xc4, 0x7c, 0x58, 0xaf, + 0x70, 0xa7, 0xcf, 0x27, 0xbe, 0xa4, 0x45, 0x74, 0xdf, 0x45, 0x8b, 0x19, 0x74, 0x52, 0x78, 0x63, + 0x34, 0x2d, 0x44, 0x0f, 0xe0, 0x2a, 0xb1, 0xed, 0xd1, 0xc5, 0xb4, 0xf7, 0x2a, 0xf7, 0x7e, 0x2b, + 0xc9, 0xfb, 0x1e, 0xb3, 0x99, 0x76, 0x8f, 0xc8, 0x8c, 0x94, 0x05, 0xc3, 0x76, 0xa8, 0xed, 0x58, + 0x7d, 0xea, 0xba, 0x9a, 0x77, 0xee, 0xd6, 0xd7, 0x17, 0x07, 0xe3, 0x38, 0xd0, 0xee, 0x9e, 0xf3, + 0xe0, 0xda, 0x51, 0x41, 0xb3, 0x00, 0xb9, 0x33, 0x32, 0x9a, 0x50, 0xf5, 0x7b, 0x50, 0x8e, 0x1c, + 0x7b, 0x54, 0x87, 0xc2, 0x98, 0xba, 0x2e, 0x19, 0x50, 0x8e, 0x12, 0x25, 0xec, 0x4f, 0xd5, 0x2a, + 0xac, 0x45, 0x8f, 0xba, 0xfa, 0x85, 0x12, 0x58, 0xb2, 0x53, 0xcc, 0x2c, 0xcf, 0xa8, 0xe3, 0x1a, + 0x96, 0xe9, 0x5b, 0xca, 0x29, 0x7a, 0x06, 0x2a, 0x7c, 0x3f, 0x6a, 0xfe, 0x73, 0x06, 0x25, 0x59, + 0xbc, 0xc6, 0x85, 0xf7, 0xa5, 0xd2, 0x16, 0x94, 0xed, 0x5d, 0x3b, 0x50, 0xc9, 0x70, 0x15, 0xb0, + 0x77, 0x6d, 0x5f, 0xe1, 0x69, 0x58, 0x63, 0xeb, 0x0b, 0x34, 0xb2, 0xfc, 0x47, 0xca, 0x4c, 0x26, + 0x55, 0xd4, 0x3f, 0xa7, 0xa1, 0x36, 0x0d, 0x0f, 0xe8, 0x35, 0xc8, 0x32, 0xa4, 0x94, 0xa0, 0xd7, + 0xd8, 0x16, 0x30, 0xba, 0xed, 0xc3, 0xe8, 0x76, 0xd7, 0x87, 0xd1, 0x66, 0xf1, 0xab, 0x6f, 0xb6, + 0x52, 0x5f, 0xfc, 0x75, 0x4b, 0xc1, 0xdc, 0x02, 0x3d, 0xc1, 0x4e, 0x33, 0x31, 0x4c, 0xcd, 0xd0, + 0xf9, 0x2b, 0x97, 0xd8, 0x51, 0x25, 0x86, 0xb9, 0xaf, 0xa3, 0xbb, 0x50, 0xeb, 0x5b, 0xa6, 0x4b, + 0x4d, 0x77, 0xe2, 0x6a, 0x02, 0xa6, 0x25, 0xd4, 0xcd, 0x9e, 0xb6, 0x96, 0xaf, 0x78, 0xcc, 0xf5, + 0xf0, 0x7a, 0x3f, 0x2e, 0x40, 0x77, 0x00, 0xce, 0xc8, 0xc8, 0xd0, 0x89, 0x67, 0x39, 0x6e, 0x3d, + 0x7b, 0x23, 0x33, 0xd7, 0xcd, 0x7d, 0x5f, 0xe5, 0x9e, 0xad, 0x13, 0x8f, 0x36, 0xb3, 0xec, 0x6d, + 0x71, 0xc4, 0x12, 0x3d, 0x07, 0xeb, 0xc4, 0xb6, 0x35, 0xd7, 0x23, 0x1e, 0xd5, 0x7a, 0x17, 0x1e, + 0x75, 0x39, 0x0a, 0xae, 0xe1, 0x0a, 0xb1, 0xed, 0x13, 0x26, 0x6d, 0x32, 0x21, 0x7a, 0x16, 0xaa, + 0x0c, 0x30, 0x0d, 0x32, 0xd2, 0x86, 0xd4, 0x18, 0x0c, 0x3d, 0x8e, 0x77, 0x19, 0x5c, 0x91, 0xd2, + 0x0e, 0x17, 0xaa, 0x7a, 0xf0, 0xc1, 0x39, 0x58, 0x22, 0x04, 0x59, 0x9d, 0x78, 0x84, 0x07, 0x72, + 0x0d, 0xf3, 0x31, 0x93, 0xd9, 0xc4, 0x1b, 0xca, 0xf0, 0xf0, 0x31, 0xba, 0x06, 0x79, 0xe9, 0x36, + 0xc3, 0xdd, 0xca, 0x19, 0xba, 0x0a, 0x39, 0xdb, 0xb1, 0xce, 0x28, 0xff, 0x72, 0x45, 0x2c, 0x26, + 0xea, 0xaf, 0xd3, 0xb0, 0x31, 0x03, 0xab, 0xcc, 0xef, 0x90, 0xb8, 0x43, 0xff, 0xb7, 0xd8, 0x18, + 0xdd, 0x66, 0x7e, 0x89, 0x4e, 0x1d, 0x99, 0x8a, 0xea, 0xd1, 0x10, 0x89, 0x34, 0xdb, 0xe1, 0xcf, + 0x65, 0x68, 0xa4, 0x36, 0x3a, 0x82, 0xda, 0x88, 0xb8, 0x9e, 0x26, 0x60, 0x4a, 0x8b, 0xa4, 0xa5, + 0x59, 0x70, 0x3e, 0x20, 0x3e, 0xb0, 0xb1, 0x3d, 0x2d, 0x1d, 0x55, 0x47, 0x31, 0x29, 0xc2, 0x70, + 0xb5, 0x77, 0xf1, 0x19, 0x31, 0x3d, 0xc3, 0xa4, 0xda, 0xcc, 0x97, 0x7b, 0x62, 0xc6, 0x69, 0xfb, + 0xcc, 0xd0, 0xa9, 0xd9, 0xf7, 0x3f, 0xd9, 0x95, 0xc0, 0x38, 0xf8, 0xa4, 0xae, 0x8a, 0xa1, 0x1a, + 0x4f, 0x0c, 0xa8, 0x0a, 0x69, 0xef, 0x5c, 0x06, 0x20, 0xed, 0x9d, 0xa3, 0x97, 0x20, 0xcb, 0x16, + 0xc9, 0x17, 0x5f, 0x9d, 0x93, 0x51, 0xa5, 0x5d, 0xf7, 0xc2, 0xa6, 0x98, 0x6b, 0xaa, 0x6a, 0x70, + 0x1a, 0x82, 0x64, 0x31, 0xed, 0x55, 0x7d, 0x1e, 0xd6, 0xa7, 0xb2, 0x41, 0xe4, 0xfb, 0x29, 0xd1, + 0xef, 0xa7, 0xae, 0x43, 0x25, 0x06, 0xfd, 0xea, 0x35, 0xb8, 0x3a, 0x0f, 0xc9, 0xd5, 0x61, 0x20, + 0x8f, 0x21, 0x32, 0x7a, 0x15, 0x8a, 0x01, 0x94, 0x8b, 0xd3, 0x38, 0x1b, 0x2b, 0x5f, 0x19, 0x07, + 0xaa, 0xec, 0x18, 0xb2, 0x6d, 0xcd, 0xf7, 0x43, 0x9a, 0xbf, 0x78, 0x81, 0xd8, 0x76, 0x87, 0xb8, + 0x43, 0xf5, 0x23, 0xa8, 0x27, 0xc1, 0xf4, 0xd4, 0x32, 0xb2, 0xc1, 0x36, 0xbc, 0x06, 0xf9, 0x53, + 0xcb, 0x19, 0x13, 0x8f, 0x3b, 0xab, 0x60, 0x39, 0x63, 0xdb, 0x53, 0x40, 0x76, 0x86, 0x8b, 0xc5, + 0x44, 0xd5, 0xe0, 0x89, 0x44, 0xa8, 0x66, 0x26, 0x86, 0xa9, 0x53, 0x11, 0xcf, 0x0a, 0x16, 0x93, + 0xd0, 0x91, 0x78, 0x59, 0x31, 0x61, 0x3f, 0xeb, 0xf2, 0xb5, 0x72, 0xff, 0x25, 0x2c, 0x67, 0xea, + 0xcd, 0x20, 0x58, 0x31, 0xc4, 0x46, 0x35, 0xc8, 0x30, 0x94, 0x57, 0x6e, 0x64, 0x6e, 0xae, 0x61, + 0x36, 0x54, 0xff, 0x51, 0x84, 0x22, 0xa6, 0xae, 0xcd, 0xd0, 0x03, 0x35, 0xa1, 0x44, 0xcf, 0xfb, + 0xd4, 0xf6, 0x7c, 0xbc, 0x9d, 0x4f, 0x57, 0x84, 0x76, 0xdb, 0xd7, 0x64, 0x5c, 0x21, 0x30, 0x43, + 0xaf, 0x48, 0x3a, 0x98, 0xcc, 0xec, 0xa4, 0x79, 0x94, 0x0f, 0xde, 0xf6, 0xf9, 0x60, 0x26, 0x91, + 0x1e, 0x08, 0xab, 0x29, 0x42, 0xf8, 0x8a, 0x24, 0x84, 0xd9, 0x25, 0x3f, 0x16, 0x63, 0x84, 0xad, + 0x18, 0x23, 0xcc, 0x2d, 0x59, 0x66, 0x02, 0x25, 0xbc, 0xed, 0x53, 0xc2, 0xfc, 0x92, 0x37, 0x9e, + 0xe2, 0x84, 0x77, 0xe2, 0x9c, 0x50, 0xf0, 0xb9, 0x67, 0x12, 0xad, 0x13, 0x49, 0xe1, 0x0f, 0x23, + 0xa4, 0xb0, 0x98, 0xc8, 0xc8, 0x84, 0x93, 0x39, 0xac, 0xb0, 0x15, 0x63, 0x85, 0xa5, 0x25, 0x31, + 0x48, 0xa0, 0x85, 0x6f, 0x47, 0x69, 0x21, 0x24, 0x32, 0x4b, 0xf9, 0xbd, 0xe7, 0xf1, 0xc2, 0xd7, + 0x03, 0x5e, 0x58, 0x4e, 0x24, 0xb6, 0x72, 0x0d, 0xd3, 0xc4, 0xf0, 0x68, 0x86, 0x18, 0x0a, 0x22, + 0xf7, 0x5c, 0xa2, 0x8b, 0x25, 0xcc, 0xf0, 0x68, 0x86, 0x19, 0x56, 0x96, 0x38, 0x5c, 0x42, 0x0d, + 0x7f, 0x31, 0x9f, 0x1a, 0x26, 0x93, 0x37, 0xf9, 0x9a, 0xab, 0x71, 0x43, 0x2d, 0x81, 0x1b, 0x0a, + 0x06, 0xf7, 0xfd, 0x44, 0xf7, 0x2b, 0x93, 0xc3, 0xa3, 0x19, 0x72, 0x58, 0x5b, 0x12, 0x8f, 0x55, + 0xd9, 0xe1, 0xf3, 0x2c, 0x39, 0x4f, 0x81, 0x08, 0x03, 0x38, 0xea, 0x38, 0x96, 0x23, 0x79, 0x9e, + 0x98, 0xa8, 0x37, 0x19, 0x5d, 0x08, 0x01, 0x63, 0x01, 0x93, 0xe4, 0x89, 0x24, 0x02, 0x12, 0xea, + 0x1f, 0x94, 0xd0, 0x96, 0x67, 0xd8, 0x28, 0xd5, 0x28, 0x49, 0xaa, 0x11, 0xe1, 0x97, 0xe9, 0x38, + 0xbf, 0xdc, 0x82, 0x32, 0x4b, 0x10, 0x53, 0xd4, 0x91, 0xd8, 0x01, 0x75, 0xbc, 0x05, 0x1b, 0x9c, + 0x01, 0x08, 0x16, 0x2a, 0xb3, 0x42, 0x96, 0x27, 0xb7, 0x75, 0xf6, 0x40, 0xec, 0x76, 0x91, 0x1e, + 0x5e, 0x84, 0x2b, 0x11, 0xdd, 0x20, 0xf1, 0x08, 0x22, 0x55, 0x0b, 0xb4, 0xf7, 0x64, 0x06, 0xfa, + 0x93, 0x12, 0x46, 0x28, 0xe4, 0x9c, 0xf3, 0xe8, 0xa1, 0xf2, 0xbf, 0xa1, 0x87, 0xe9, 0xff, 0x9a, + 0x1e, 0x46, 0xf3, 0x68, 0x26, 0x9e, 0x47, 0xff, 0xa9, 0x84, 0x9f, 0x24, 0x20, 0x7b, 0x7d, 0x4b, + 0xa7, 0x32, 0xb3, 0xf1, 0x31, 0x4b, 0x49, 0x23, 0x6b, 0x20, 0xf3, 0x17, 0x1b, 0x32, 0xad, 0x00, + 0xd4, 0x4b, 0x12, 0xb3, 0x83, 0xa4, 0x98, 0xe3, 0x01, 0x96, 0x49, 0xb1, 0x06, 0x99, 0x87, 0x54, + 0x40, 0xf0, 0x1a, 0x66, 0x43, 0xa6, 0xc7, 0xf7, 0x18, 0x07, 0xd6, 0x35, 0x2c, 0x26, 0xe8, 0x35, + 0x28, 0xf1, 0x7e, 0x8a, 0x66, 0xd9, 0xae, 0x44, 0xcb, 0x27, 0xa3, 0x6b, 0x15, 0x6d, 0x93, 0xed, + 0x63, 0xa6, 0x73, 0x64, 0xbb, 0xb8, 0x68, 0xcb, 0x51, 0x24, 0xdf, 0x97, 0x62, 0xb4, 0xf3, 0x3a, + 0x94, 0xd8, 0xdb, 0xbb, 0x36, 0xe9, 0x53, 0x0e, 0x7d, 0x25, 0x1c, 0x0a, 0xd4, 0x07, 0x80, 0x66, + 0x01, 0x1c, 0x75, 0x20, 0x4f, 0xcf, 0xa8, 0xe9, 0x89, 0xfc, 0x5b, 0xde, 0xbd, 0x36, 0x87, 0xd3, + 0x51, 0xd3, 0x6b, 0xd6, 0x59, 0x90, 0xff, 0xfe, 0xcd, 0x56, 0x4d, 0x68, 0xbf, 0x60, 0x8d, 0x0d, + 0x8f, 0x8e, 0x6d, 0xef, 0x02, 0x4b, 0x7b, 0xf5, 0x57, 0x69, 0x46, 0xb0, 0x62, 0xe0, 0x3e, 0x37, + 0xb6, 0xfe, 0x8e, 0x4f, 0x47, 0xc8, 0xf5, 0x6a, 0xf1, 0xde, 0x04, 0x18, 0x10, 0x57, 0xfb, 0x94, + 0x98, 0x1e, 0xd5, 0x65, 0xd0, 0x23, 0x12, 0xd4, 0x80, 0x22, 0x9b, 0x4d, 0x5c, 0xaa, 0x4b, 0x9e, + 0x1f, 0xcc, 0x23, 0xeb, 0x2c, 0x7c, 0xb7, 0x75, 0xc6, 0xa3, 0x5c, 0x9c, 0x8e, 0xf2, 0x6f, 0xd2, + 0xe1, 0x29, 0x09, 0xb9, 0xe8, 0xff, 0x5f, 0x1c, 0x7e, 0xcb, 0x0b, 0xd4, 0x78, 0x96, 0x45, 0x27, + 0xb0, 0x11, 0x9c, 0x52, 0x6d, 0xc2, 0x4f, 0xaf, 0xbf, 0xef, 0x56, 0x3d, 0xe6, 0xb5, 0xb3, 0xb8, + 0xd8, 0x45, 0x3f, 0x81, 0xc7, 0xa7, 0x10, 0x28, 0x70, 0x9d, 0x5e, 0x11, 0x88, 0x1e, 0x8b, 0x03, + 0x91, 0xef, 0x39, 0x8c, 0x55, 0xe6, 0x3b, 0x9e, 0x8d, 0x7d, 0x56, 0xf3, 0x44, 0x39, 0xc3, 0xdc, + 0xaf, 0xff, 0x0c, 0x54, 0x1c, 0xea, 0xb1, 0x32, 0x3c, 0x56, 0x55, 0xae, 0x09, 0xa1, 0xac, 0x55, + 0x8f, 0xe1, 0xb1, 0xb9, 0xdc, 0x01, 0xfd, 0x00, 0x4a, 0x21, 0xed, 0x50, 0x12, 0x0a, 0xb4, 0xa0, + 0xe8, 0x08, 0x75, 0xd5, 0x3f, 0x2a, 0xa1, 0xcb, 0x78, 0x19, 0xd3, 0x86, 0xbc, 0x43, 0xdd, 0xc9, + 0x48, 0x14, 0x16, 0xd5, 0xdd, 0x17, 0x57, 0x63, 0x1d, 0x4c, 0x3a, 0x19, 0x79, 0x58, 0x1a, 0xab, + 0x0f, 0x20, 0x2f, 0x24, 0xa8, 0x0c, 0x85, 0x7b, 0x87, 0x77, 0x0f, 0x8f, 0x3e, 0x38, 0xac, 0xa5, + 0x10, 0x40, 0x7e, 0xaf, 0xd5, 0x6a, 0x1f, 0x77, 0x6b, 0x0a, 0x2a, 0x41, 0x6e, 0xaf, 0x79, 0x84, + 0xbb, 0xb5, 0x34, 0x13, 0xe3, 0xf6, 0x7b, 0xed, 0x56, 0xb7, 0x96, 0x41, 0x1b, 0x50, 0x11, 0x63, + 0xed, 0xce, 0x11, 0x7e, 0x7f, 0xaf, 0x5b, 0xcb, 0x46, 0x44, 0x27, 0xed, 0xc3, 0x77, 0xda, 0xb8, + 0x96, 0x53, 0x5f, 0x66, 0x95, 0x4b, 0x02, 0x4f, 0x09, 0x6b, 0x14, 0x25, 0x52, 0xa3, 0xa8, 0xbf, + 0x4f, 0x43, 0x23, 0x99, 0x7c, 0xa0, 0xf7, 0xa6, 0x16, 0xbe, 0x7b, 0x09, 0xe6, 0x32, 0xb5, 0x7a, + 0xf4, 0x2c, 0x54, 0x1d, 0x7a, 0x4a, 0xbd, 0xfe, 0x50, 0x90, 0x21, 0x91, 0xd8, 0x2a, 0xb8, 0x22, + 0xa5, 0xdc, 0xc8, 0x15, 0x6a, 0x1f, 0xd3, 0xbe, 0xa7, 0x89, 0x72, 0x49, 0x6c, 0xba, 0x12, 0x53, + 0x63, 0xd2, 0x13, 0x21, 0x54, 0x3f, 0xba, 0x54, 0x2c, 0x4b, 0x90, 0xc3, 0xed, 0x2e, 0xfe, 0x69, + 0x2d, 0x83, 0x10, 0x54, 0xf9, 0x50, 0x3b, 0x39, 0xdc, 0x3b, 0x3e, 0xe9, 0x1c, 0xb1, 0x58, 0x5e, + 0x81, 0x75, 0x3f, 0x96, 0xbe, 0x30, 0xa7, 0x92, 0x70, 0x37, 0x2c, 0xa9, 0xd3, 0xd0, 0x6d, 0x28, + 0x4a, 0xa6, 0xe3, 0x9f, 0xb5, 0xc6, 0x6c, 0xa7, 0xe2, 0x7d, 0xa9, 0x81, 0x03, 0x5d, 0xf5, 0xdf, + 0x0a, 0xac, 0x4f, 0x9d, 0x41, 0xb4, 0x0b, 0x39, 0xc1, 0xd9, 0x93, 0x5a, 0xf6, 0x1c, 0x42, 0xe4, + 0x81, 0x15, 0xaa, 0xe8, 0x4d, 0x28, 0x52, 0xd9, 0x71, 0x98, 0x77, 0xd6, 0xc5, 0xef, 0xfb, 0x3d, + 0x09, 0x69, 0x1a, 0x58, 0xa0, 0xb7, 0xa0, 0x14, 0x80, 0x89, 0xac, 0xf1, 0x9e, 0x9e, 0x35, 0x0f, + 0x60, 0x48, 0xda, 0x87, 0x36, 0xe8, 0xf5, 0x90, 0xa7, 0x65, 0x67, 0x2b, 0x05, 0x69, 0x2e, 0x14, + 0xa4, 0xb1, 0xaf, 0xaf, 0xb6, 0xa0, 0x1c, 0x59, 0x0f, 0x7a, 0x12, 0x4a, 0x63, 0x72, 0x2e, 0x3b, + 0x59, 0xa2, 0x17, 0x51, 0x1c, 0x93, 0x73, 0xd1, 0xc4, 0x7a, 0x1c, 0x0a, 0xec, 0xe1, 0x80, 0x88, + 0x20, 0x67, 0x70, 0x7e, 0x4c, 0xce, 0xdf, 0x25, 0xae, 0xfa, 0x21, 0x54, 0xe3, 0x5d, 0x1c, 0xb6, + 0xd9, 0x1d, 0x6b, 0x62, 0xea, 0xdc, 0x47, 0x0e, 0x8b, 0x09, 0x7a, 0x15, 0x72, 0x67, 0x96, 0xc0, + 0xc3, 0xf9, 0xa8, 0x70, 0xdf, 0xf2, 0x68, 0xa4, 0x0b, 0x24, 0xb4, 0xd5, 0xcf, 0x20, 0xc7, 0xf1, + 0x8d, 0x61, 0x15, 0xef, 0xc7, 0x48, 0x8e, 0xca, 0xc6, 0xe8, 0x43, 0x00, 0xe2, 0x79, 0x8e, 0xd1, + 0x9b, 0x84, 0x8e, 0xb7, 0xe6, 0xe3, 0xe3, 0x9e, 0xaf, 0xd7, 0xbc, 0x2e, 0x81, 0xf2, 0x6a, 0x68, + 0x1a, 0x01, 0xcb, 0x88, 0x43, 0xf5, 0x10, 0xaa, 0x71, 0x5b, 0x9f, 0x56, 0x29, 0x73, 0x68, 0x55, + 0x3a, 0x4a, 0xab, 0x02, 0x52, 0x96, 0x11, 0xbd, 0x37, 0x3e, 0x51, 0x3f, 0x57, 0xa0, 0xd8, 0x3d, + 0x97, 0x27, 0x27, 0xa1, 0xed, 0x13, 0x9a, 0xa6, 0xa3, 0x4d, 0x0e, 0xd1, 0x47, 0xca, 0x04, 0xdd, + 0xa9, 0xb7, 0x03, 0x6c, 0xc8, 0xae, 0x5a, 0xa1, 0xfa, 0x6d, 0x3a, 0x89, 0x87, 0x6f, 0x40, 0x29, + 0xd8, 0x55, 0x8c, 0xec, 0x13, 0x5d, 0x77, 0xa8, 0xeb, 0xca, 0xb5, 0xf9, 0x53, 0xde, 0x45, 0xb4, + 0x3e, 0x95, 0x6d, 0x94, 0x0c, 0x16, 0x13, 0x55, 0x87, 0xf5, 0xa9, 0xcc, 0x88, 0xde, 0x80, 0x82, + 0x3d, 0xe9, 0x69, 0x7e, 0x78, 0xa6, 0x0e, 0x8f, 0xcf, 0x23, 0x27, 0xbd, 0x91, 0xd1, 0xbf, 0x4b, + 0x2f, 0xfc, 0x97, 0xb1, 0x27, 0xbd, 0xbb, 0x22, 0x8a, 0xe2, 0x57, 0xd2, 0xd1, 0x5f, 0x39, 0x83, + 0xa2, 0xbf, 0x29, 0xd0, 0x8f, 0xa2, 0xe7, 0x44, 0x99, 0x3d, 0xe6, 0xf1, 0x6c, 0x2d, 0xdd, 0x47, + 0x8e, 0xc9, 0x2d, 0xd8, 0x70, 0x8d, 0x81, 0x49, 0x75, 0x2d, 0x2c, 0x37, 0xf8, 0xaf, 0x15, 0xf1, + 0xba, 0x78, 0x70, 0xe0, 0xd7, 0x1a, 0xea, 0xbf, 0x14, 0x28, 0xfa, 0x07, 0x16, 0xbd, 0x1c, 0xd9, + 0x77, 0xd5, 0x39, 0x8d, 0x14, 0x5f, 0x31, 0x6c, 0x04, 0xc6, 0xdf, 0x35, 0x7d, 0xf9, 0x77, 0x4d, + 0xea, 0xe8, 0xfa, 0xad, 0xf5, 0xec, 0xa5, 0x5b, 0xeb, 0x2f, 0x00, 0xf2, 0x2c, 0x8f, 0x8c, 0xb4, + 0x33, 0xcb, 0x33, 0xcc, 0x81, 0x26, 0x82, 0x2d, 0x48, 0x5b, 0x8d, 0x3f, 0xb9, 0xcf, 0x1f, 0x1c, + 0xf3, 0xb8, 0xff, 0x52, 0x81, 0x62, 0x90, 0x7e, 0x2f, 0xdb, 0xd7, 0xbb, 0x06, 0x79, 0x99, 0x61, + 0x44, 0x63, 0x4f, 0xce, 0x82, 0x16, 0x73, 0x36, 0xd2, 0x62, 0x6e, 0x30, 0xe8, 0xf6, 0x08, 0xe7, + 0x20, 0xa2, 0xe2, 0x0b, 0xe6, 0xb7, 0x5e, 0x87, 0x72, 0xa4, 0xc5, 0xca, 0x4e, 0xde, 0x61, 0xfb, + 0x83, 0x5a, 0xaa, 0x51, 0xf8, 0xfc, 0xcb, 0x1b, 0x99, 0x43, 0xfa, 0x29, 0xdb, 0xb3, 0xb8, 0xdd, + 0xea, 0xb4, 0x5b, 0x77, 0x6b, 0x4a, 0xa3, 0xfc, 0xf9, 0x97, 0x37, 0x0a, 0x98, 0xf2, 0x26, 0xce, + 0xad, 0x0e, 0xac, 0x45, 0xbf, 0x4a, 0x3c, 0x49, 0x21, 0xa8, 0xbe, 0x73, 0xef, 0xf8, 0x60, 0xbf, + 0xb5, 0xd7, 0x6d, 0x6b, 0xf7, 0x8f, 0xba, 0xed, 0x9a, 0x82, 0x1e, 0x87, 0x2b, 0x07, 0xfb, 0xef, + 0x76, 0xba, 0x5a, 0xeb, 0x60, 0xbf, 0x7d, 0xd8, 0xd5, 0xf6, 0xba, 0xdd, 0xbd, 0xd6, 0xdd, 0x5a, + 0x7a, 0xf7, 0x77, 0x00, 0xeb, 0x7b, 0xcd, 0xd6, 0x3e, 0x4b, 0xb0, 0x46, 0x9f, 0xf0, 0x72, 0xbc, + 0x05, 0x59, 0x5e, 0x70, 0x2f, 0xbc, 0xcf, 0x6d, 0x2c, 0x6e, 0xef, 0xa1, 0x3b, 0x90, 0xe3, 0xb5, + 0x38, 0x5a, 0x7c, 0xc1, 0xdb, 0x58, 0xd2, 0xef, 0x63, 0x2f, 0xc3, 0x8f, 0xc7, 0xc2, 0x1b, 0xdf, + 0xc6, 0xe2, 0xf6, 0x1f, 0xc2, 0x50, 0x0a, 0xab, 0x84, 0xe5, 0x37, 0xa0, 0x8d, 0x15, 0xc0, 0x06, + 0x1d, 0x40, 0xc1, 0xaf, 0xbf, 0x96, 0xdd, 0xc9, 0x36, 0x96, 0xf6, 0xe7, 0x58, 0xb8, 0x44, 0x9d, + 0xbc, 0xf8, 0x82, 0xb9, 0xb1, 0xa4, 0xd9, 0x88, 0xf6, 0x21, 0x2f, 0xa9, 0xef, 0x92, 0x7b, 0xd6, + 0xc6, 0xb2, 0x7e, 0x1b, 0x0b, 0x5a, 0xd8, 0x80, 0x58, 0x7e, 0x6d, 0xde, 0x58, 0xa1, 0x8f, 0x8a, + 0xee, 0x01, 0x44, 0xaa, 0xe2, 0x15, 0xee, 0xc3, 0x1b, 0xab, 0xf4, 0x47, 0xd1, 0x11, 0x14, 0x83, + 0xea, 0x67, 0xe9, 0xed, 0x74, 0x63, 0x79, 0xa3, 0x12, 0x3d, 0x80, 0x4a, 0x9c, 0xf6, 0xaf, 0x76, + 0xe7, 0xdc, 0x58, 0xb1, 0x03, 0xc9, 0xfc, 0xc7, 0x6b, 0x80, 0xd5, 0xee, 0xa0, 0x1b, 0x2b, 0x36, + 0x24, 0xd1, 0xc7, 0xb0, 0x31, 0xcb, 0xd1, 0x57, 0xbf, 0x92, 0x6e, 0x5c, 0xa2, 0x45, 0x89, 0xc6, + 0x80, 0xe6, 0x70, 0xfb, 0x4b, 0xdc, 0x50, 0x37, 0x2e, 0xd3, 0xb1, 0x64, 0xa1, 0x8b, 0x13, 0xe6, + 0xd5, 0x6e, 0xac, 0x1b, 0x2b, 0xf6, 0x2e, 0x9b, 0xef, 0x7d, 0xf5, 0xed, 0xa6, 0xf2, 0xf5, 0xb7, + 0x9b, 0xca, 0xdf, 0xbe, 0xdd, 0x54, 0xbe, 0x78, 0xb4, 0x99, 0xfa, 0xfa, 0xd1, 0x66, 0xea, 0x2f, + 0x8f, 0x36, 0x53, 0x3f, 0x7b, 0x69, 0x60, 0x78, 0xc3, 0x49, 0x6f, 0xbb, 0x6f, 0x8d, 0x77, 0x46, + 0xe4, 0xb3, 0x8b, 0x11, 0xd5, 0x07, 0xd4, 0x89, 0x0c, 0x5f, 0xec, 0x5b, 0x0e, 0x8d, 0xfc, 0xe7, + 0xa7, 0x97, 0xe7, 0x99, 0xeb, 0x95, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb0, 0xf7, 0x58, 0xc2, + 0x13, 0x24, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4361,6 +4371,13 @@ func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.AbciVersion) > 0 { + i -= len(m.AbciVersion) + copy(dAtA[i:], m.AbciVersion) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AbciVersion))) + i-- + dAtA[i] = 0x22 + } if m.P2PVersion != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) i-- @@ -6755,6 +6772,10 @@ func (m *RequestInfo) Size() (n int) { if m.P2PVersion != 0 { n += 1 + sovTypes(uint64(m.P2PVersion)) } + l = len(m.AbciVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -8547,6 +8568,38 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AbciVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 4aa8c2abb4..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 1.0.{build} -configuration: Release -platform: -- x64 -- x86 -clone_folder: c:\go\path\src\github.com\tendermint\tendermint -before_build: -- cmd: set GOPATH=%GOROOT%\path -- cmd: set PATH=%GOPATH%\bin;%PATH% -build_script: -- cmd: make test -test: off diff --git a/blockchain/doc.go b/blockchain/doc.go new file mode 100644 index 0000000000..b5d9858d0a --- /dev/null +++ b/blockchain/doc.go @@ -0,0 +1,17 @@ +/* +Package blockchain provides two implementations of the fast-sync protocol. + +- v0 was the very first implementation. it's battle tested, but does not have a +lot of test coverage. +- v2 is the newest implementation, with a focus on testability and readability. + +Check out ADR-40 for the formal model and requirements. + +# Termination criteria + +1. the maximum peer height is reached +2. termination timeout is triggered, which is set if the peer set is empty or +there are no pending requests. + +*/ +package blockchain diff --git a/blockchain/v0/pool.go b/blockchain/v0/pool.go index 68e393bd20..0318f317f6 100644 --- a/blockchain/v0/pool.go +++ b/blockchain/v0/pool.go @@ -58,12 +58,19 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests are not at peer limits, we can probably switch to consensus reactor */ +// BlockRequest stores a block request identified by the block Height and the +// PeerID responsible for delivering the block. +type BlockRequest struct { + Height int64 + PeerID p2p.ID +} + // BlockPool keeps track of the fast sync peers, block requests and block responses. type BlockPool struct { service.BaseService - startTime time.Time + lastAdvance time.Time - mtx tmsync.Mutex + mtx tmsync.RWMutex // block requests requesters map[int64]*bpRequester height int64 // the lowest key in requesters. @@ -98,8 +105,8 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p // OnStart implements service.Service by spawning requesters routine and recording // pool's start time. func (pool *BlockPool) OnStart() error { + pool.lastAdvance = time.Now() go pool.makeRequestersRoutine() - pool.startTime = time.Now() return nil } @@ -134,6 +141,7 @@ func (pool *BlockPool) removeTimedoutPeers() { defer pool.mtx.Unlock() for _, peer := range pool.peers { + // check if peer timed out if !peer.didTimeout && peer.numPending > 0 { curRate := peer.recvMonitor.Status().CurRate // curRate can be 0 on start @@ -147,6 +155,7 @@ func (pool *BlockPool) removeTimedoutPeers() { peer.didTimeout = true } } + if peer.didTimeout { pool.removePeer(peer.id) } @@ -156,33 +165,24 @@ func (pool *BlockPool) removeTimedoutPeers() { // GetStatus returns pool's height, numPending requests and the number of // requesters. func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { - pool.mtx.Lock() - defer pool.mtx.Unlock() + pool.mtx.RLock() + defer pool.mtx.RUnlock() return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters) } // IsCaughtUp returns true if this node is caught up, false - otherwise. -// TODO: relax conditions, prevent abuse. func (pool *BlockPool) IsCaughtUp() bool { - pool.mtx.Lock() - defer pool.mtx.Unlock() + pool.mtx.RLock() + defer pool.mtx.RUnlock() // Need at least 1 peer to be considered caught up. if len(pool.peers) == 0 { - pool.Logger.Debug("Blockpool has no peers") return false } - - // Some conditions to determine if we're caught up. - // Ensures we've either received a block or waited some amount of time, - // and that we're synced to the highest known height. - // Note we use maxPeerHeight - 1 because to sync block H requires block H+1 + // NOTE: we use maxPeerHeight - 1 because to sync block H requires block H+1 // to verify the LastCommit. - receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second - ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1) - isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers - return isCaughtUp + return pool.height >= (pool.maxPeerHeight - 1) } // PeekTwoBlocks returns blocks at pool.height and pool.height+1. @@ -190,8 +190,8 @@ func (pool *BlockPool) IsCaughtUp() bool { // So we peek two blocks at a time. // The caller will verify the commit. func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { - pool.mtx.Lock() - defer pool.mtx.Unlock() + pool.mtx.RLock() + defer pool.mtx.RUnlock() if r := pool.requesters[pool.height]; r != nil { first = r.getBlock() @@ -209,16 +209,12 @@ func (pool *BlockPool) PopRequest() { defer pool.mtx.Unlock() if r := pool.requesters[pool.height]; r != nil { - /* The block can disappear at any time, due to removePeer(). - if r := pool.requesters[pool.height]; r == nil || r.block == nil { - PanicSanity("PopRequest() requires a valid block") - } - */ if err := r.Stop(); err != nil { pool.Logger.Error("Error stopping requester", "err", err) } delete(pool.requesters, pool.height) pool.height++ + pool.lastAdvance = time.Now() } else { panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height)) } @@ -248,14 +244,8 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int requester := pool.requesters[block.Height] if requester == nil { - pool.Logger.Info( - "peer sent us a block we didn't expect", - "peer", - peerID, - "curHeight", - pool.height, - "blockHeight", - block.Height) + pool.Logger.Error("peer sent us a block we didn't expect", + "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) diff := pool.height - block.Height if diff < 0 { diff *= -1 @@ -273,18 +263,27 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int peer.decrPending(blockSize) } } else { - pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height) - pool.sendError(errors.New("invalid peer"), peerID) + err := errors.New("requester is different or block already exists") + pool.Logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) + pool.sendError(err, peerID) } } // MaxPeerHeight returns the highest reported height. func (pool *BlockPool) MaxPeerHeight() int64 { - pool.mtx.Lock() - defer pool.mtx.Unlock() + pool.mtx.RLock() + defer pool.mtx.RUnlock() return pool.maxPeerHeight } +// LastAdvance returns the time when the last block was processed (or start +// time if no blocks were processed). +func (pool *BlockPool) LastAdvance() time.Time { + pool.mtx.RLock() + defer pool.mtx.RUnlock() + return pool.lastAdvance +} + // SetPeerRange sets the peer's alleged blockchain base and height. func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) { pool.mtx.Lock() @@ -601,7 +600,6 @@ OUTER_LOOP: } peer = bpr.pool.pickIncrAvailablePeer(bpr.height) if peer == nil { - // log.Info("No peers available", "height", height) time.Sleep(requestIntervalMS * time.Millisecond) continue PICK_PEER_LOOP } @@ -638,10 +636,3 @@ OUTER_LOOP: } } } - -// BlockRequest stores a block request identified by the block Height and the PeerID responsible for -// delivering the block -type BlockRequest struct { - Height int64 - PeerID p2p.ID -} diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index bdb24a3d1e..598ed701bf 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -28,6 +28,9 @@ const ( statusUpdateIntervalSeconds = 10 // check if we should switch to consensus reactor switchToConsensusIntervalSeconds = 1 + + // switch to consensus after this duration of inactivity + syncTimeout = 60 * time.Second ) type consensusReactor interface { @@ -158,7 +161,7 @@ func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { return } - peer.Send(BlockchainChannel, msgBytes) + _ = peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine // peer is added to the pool once we receive the first @@ -204,21 +207,25 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest, } // Receive implements Reactor by handling 4 types of messages (look below). +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + logger := bcR.Logger.With("src", src, "chId", chID) + msg, err := bc.DecodeMsg(msgBytes) if err != nil { - bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + logger.Error("Error decoding message", "err", err) bcR.Switch.StopPeerForError(src, err) return } if err = bc.ValidateMsg(msg); err != nil { - bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + logger.Error("Peer sent us invalid msg", "msg", msg, "err", err) bcR.Switch.StopPeerForError(src, err) return } - bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) + logger.Debug("Receive", "msg", msg) switch msg := msg.(type) { case *bcproto.BlockRequest: @@ -226,7 +233,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) case *bcproto.BlockResponse: bi, err := types.BlockFromProto(msg.Block) if err != nil { - bcR.Logger.Error("Block content is invalid", "err", err) + logger.Error("Block content is invalid", "err", err) bcR.Switch.StopPeerForError(src, err) return } @@ -238,7 +245,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) Base: bcR.store.Base(), }) if err != nil { - bcR.Logger.Error("could not convert msg to protobut", "err", err) + logger.Error("could not convert msg to protobut", "err", err) return } src.TrySend(BlockchainChannel, msgBytes) @@ -246,45 +253,50 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Got a peer status. Unverified. bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) case *bcproto.NoBlockResponse: - bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) + logger.Debug("Peer does not have requested block", "height", msg.Height) default: - bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } } // Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { + var ( + trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond) + statusUpdateTicker = time.NewTicker(statusUpdateIntervalSeconds * time.Second) + switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second) - trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) - statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) - switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second) - - blocksSynced := uint64(0) + blocksSynced = uint64(0) - chainID := bcR.initialState.ChainID - state := bcR.initialState + chainID = bcR.initialState.ChainID + state = bcR.initialState - lastHundred := time.Now() - lastRate := 0.0 + lastHundred = time.Now() + lastRate = 0.0 - didProcessCh := make(chan struct{}, 1) + didProcessCh = make(chan struct{}, 1) + ) go func() { for { select { + case <-bcR.Quit(): return + case <-bcR.pool.Quit(): return + case request := <-bcR.requestsCh: peer := bcR.Switch.Peers().Get(request.PeerID) if peer == nil { + bcR.Logger.Debug("Can't send request: no peer", "peer_id", request.PeerID) continue } msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height}) if err != nil { - bcR.Logger.Error("could not convert msg to proto", "err", err) + bcR.Logger.Error("could not convert BlockRequest to proto", "err", err) continue } @@ -292,6 +304,7 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { if !queued { bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) } + case err := <-bcR.errorsCh: peer := bcR.Switch.Peers().Get(err.peerID) if peer != nil { @@ -300,8 +313,7 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { case <-statusUpdateTicker.C: // ask for status updates - go bcR.BroadcastStatusRequest() // nolint: errcheck - + go bcR.BroadcastStatusRequest() } } }() @@ -309,27 +321,41 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { FOR_LOOP: for { select { + case <-switchToConsensusTicker.C: - height, numPending, lenRequesters := bcR.pool.GetStatus() - outbound, inbound, _ := bcR.Switch.NumPeers() - bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters, - "outbound", outbound, "inbound", inbound) - if bcR.pool.IsCaughtUp() { + var ( + height, numPending, lenRequesters = bcR.pool.GetStatus() + outbound, inbound, _ = bcR.Switch.NumPeers() + lastAdvance = bcR.pool.LastAdvance() + ) + + bcR.Logger.Debug("Consensus ticker", + "numPending", numPending, + "total", lenRequesters) + + switch { + case bcR.pool.IsCaughtUp(): bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) - if err := bcR.pool.Stop(); err != nil { - bcR.Logger.Error("Error stopping pool", "err", err) - } - conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) - if ok { - conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) - } - // else { - // should only happen during testing - // } + case time.Since(lastAdvance) > syncTimeout: + bcR.Logger.Error(fmt.Sprintf("No progress since last advance: %v", lastAdvance)) + default: + bcR.Logger.Info("Not caught up yet", + "height", height, "max_peer_height", bcR.pool.MaxPeerHeight(), + "num_peers", outbound+inbound, + "timeout_in", syncTimeout-time.Since(lastAdvance)) + continue + } - break FOR_LOOP + if err := bcR.pool.Stop(); err != nil { + bcR.Logger.Error("Error stopping pool", "err", err) + } + conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) + if ok { + conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) } + break FOR_LOOP + case <-trySyncTicker.C: // chan time select { case didProcessCh <- struct{}{}: @@ -356,31 +382,37 @@ FOR_LOOP: didProcessCh <- struct{}{} } - firstParts := first.MakePartSet(types.BlockPartSizeBytes) - firstPartSetHeader := firstParts.Header() - firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} + var ( + firstParts = first.MakePartSet(types.BlockPartSizeBytes) + firstPartSetHeader = firstParts.Header() + firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} + ) + // Finally, verify the first block using the second's commit // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := state.Validators.VerifyCommitLight( - chainID, firstID, first.Height, second.LastCommit) + err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit) if err != nil { - bcR.Logger.Error("Error in validation", "err", err) + err = fmt.Errorf("invalid last commit: %w", err) + bcR.Logger.Error(err.Error(), + "last_commit", second.LastCommit, "block_id", firstID, "height", first.Height) + peerID := bcR.pool.RedoRequest(first.Height) peer := bcR.Switch.Peers().Get(peerID) if peer != nil { - // NOTE: we've already removed the peer's request, but we - // still need to clean up the rest. - bcR.Switch.StopPeerForError(peer, fmt.Errorf("blockchainReactor validation error: %v", err)) + // NOTE: we've already removed the peer's request, but we still need + // to clean up the rest. + bcR.Switch.StopPeerForError(peer, err) } + peerID2 := bcR.pool.RedoRequest(second.Height) - peer2 := bcR.Switch.Peers().Get(peerID2) - if peer2 != nil && peer2 != peer { - // NOTE: we've already removed the peer's request, but we - // still need to clean up the rest. - bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err)) + if peerID2 != peerID { + if peer2 := bcR.Switch.Peers().Get(peerID2); peer2 != nil { + bcR.Switch.StopPeerForError(peer2, err) + } } + continue FOR_LOOP } else { bcR.pool.PopRequest() @@ -388,8 +420,8 @@ FOR_LOOP: // TODO: batch saves so we dont persist to disk every block bcR.store.SaveBlock(first, firstParts, second.LastCommit) - // TODO: same thing for app - but we would need a way to - // get the hash without persisting the state + // TODO: same thing for app - but we would need a way to get the hash + // without persisting the state. var err error state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { @@ -400,8 +432,8 @@ FOR_LOOP: if blocksSynced%100 == 0 { lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, - "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) + bcR.Logger.Info("Fast Sync Rate", + "height", bcR.pool.height, "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) lastHundred = time.Now() } } @@ -414,14 +446,13 @@ FOR_LOOP: } // BroadcastStatusRequest broadcasts `BlockStore` base and height. -func (bcR *BlockchainReactor) BroadcastStatusRequest() error { +func (bcR *BlockchainReactor) BroadcastStatusRequest() { bm, err := bc.EncodeMsg(&bcproto.StatusRequest{}) if err != nil { - bcR.Logger.Error("could not convert msg to proto", "err", err) - return fmt.Errorf("could not convert msg to proto: %w", err) + bcR.Logger.Error("could not convert StatusRequest to proto", "err", err) + return } - bcR.Switch.Broadcast(BlockchainChannel, bm) - - return nil + // We don't care about whenever broadcast is successful or not. + _ = bcR.Switch.Broadcast(BlockchainChannel, bm) } diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go index c9a92aafe6..2251ca0a3d 100644 --- a/blockchain/v2/io.go +++ b/blockchain/v2/io.go @@ -1,7 +1,7 @@ package v2 import ( - "fmt" + "errors" bc "github.com/lazyledger/lazyledger-core/blockchain" "github.com/lazyledger/lazyledger-core/p2p" @@ -10,12 +10,17 @@ import ( "github.com/lazyledger/lazyledger-core/types" ) +var ( + errPeerQueueFull = errors.New("peer queue full") +) + type iIO interface { - sendBlockRequest(peerID p2p.ID, height int64) error - sendBlockToPeer(block *types.Block, peerID p2p.ID) error - sendBlockNotFound(height int64, peerID p2p.ID) error - sendStatusResponse(base, height int64, peerID p2p.ID) error + sendBlockRequest(peer p2p.Peer, height int64) error + sendBlockToPeer(block *types.Block, peer p2p.Peer) error + sendBlockNotFound(height int64, peer p2p.Peer) error + sendStatusResponse(base, height int64, peer p2p.Peer) error + sendStatusRequest(peer p2p.Peer) error broadcastStatusRequest() error trySwitchToConsensus(state state.State, skipWAL bool) bool @@ -42,11 +47,7 @@ type consensusReactor interface { SwitchToConsensus(state state.State, skipWAL bool) } -func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { - peer := sio.sw.Peers().Get(peerID) - if peer == nil { - return fmt.Errorf("peer not found") - } +func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error { msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height}) if err != nil { return err @@ -54,34 +55,25 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { queued := peer.TrySend(BlockchainChannel, msgBytes) if !queued { - return fmt.Errorf("send queue full") + return errPeerQueueFull } return nil } -func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) error { - peer := sio.sw.Peers().Get(peerID) - if peer == nil { - return fmt.Errorf("peer not found") - } - +func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error { msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base}) if err != nil { return err } if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return fmt.Errorf("peer queue full") + return errPeerQueueFull } return nil } -func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { - peer := sio.sw.Peers().Get(peerID) - if peer == nil { - return fmt.Errorf("peer not found") - } +func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { if block == nil { panic("trying to send nil block") } @@ -96,24 +88,20 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { return err } if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return fmt.Errorf("peer queue full") + return errPeerQueueFull } return nil } -func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error { - peer := sio.sw.Peers().Get(peerID) - if peer == nil { - return fmt.Errorf("peer not found") - } +func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error { msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height}) if err != nil { return err } if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return fmt.Errorf("peer queue full") + return errPeerQueueFull } return nil @@ -127,6 +115,19 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool return ok } +func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error { + msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) + if err != nil { + return err + } + + if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { + return errPeerQueueFull + } + + return nil +} + func (sio *switchIO) broadcastStatusRequest() error { msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) if err != nil { diff --git a/blockchain/v2/processor.go b/blockchain/v2/processor.go index 195a5fa182..0e45c70dbd 100644 --- a/blockchain/v2/processor.go +++ b/blockchain/v2/processor.go @@ -17,6 +17,11 @@ type pcBlockVerificationFailure struct { secondPeerID p2p.ID } +func (e pcBlockVerificationFailure) String() string { + return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", + e.height, e.firstPeerID, e.secondPeerID) +} + // successful block execution type pcBlockProcessed struct { priorityNormal @@ -24,6 +29,10 @@ type pcBlockProcessed struct { peerID p2p.ID } +func (e pcBlockProcessed) String() string { + return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) +} + // processor has finished type pcFinished struct { priorityNormal @@ -87,9 +96,12 @@ func (state *pcState) synced() bool { } func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) { - if _, ok := state.queue[height]; ok { - panic("duplicate block enqueued by processor") + if item, ok := state.queue[height]; ok { + panic(fmt.Sprintf( + "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", + height, block.Hash(), peerID, item.block.Hash(), item.peerID)) } + state.queue[height] = queueItem{block: block, peerID: peerID} } @@ -145,16 +157,20 @@ func (state *pcState) handle(event Event) (Event, error) { } return noOp, nil } - first, second := firstItem.block, secondItem.block - firstParts := first.MakePartSet(types.BlockPartSizeBytes) - firstPartSetHeader := firstParts.Header() - firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} + var ( + first, second = firstItem.block, secondItem.block + firstParts = first.MakePartSet(types.BlockPartSizeBytes) + firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} + ) + // verify if +second+ last commit "confirms" +first+ block err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit) if err != nil { state.purgePeer(firstItem.peerID) - state.purgePeer(secondItem.peerID) + if firstItem.peerID != secondItem.peerID { + state.purgePeer(secondItem.peerID) + } return pcBlockVerificationFailure{ height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, nil @@ -170,7 +186,6 @@ func (state *pcState) handle(event Event) (Event, error) { state.blocksSynced++ return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } return noOp, nil diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go index 1f6da4c34e..9b5115d419 100644 --- a/blockchain/v2/reactor.go +++ b/blockchain/v2/reactor.go @@ -47,11 +47,6 @@ type BlockchainReactor struct { store blockStore } -//nolint:unused,deadcode -type blockVerifier interface { - VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error -} - type blockApplier interface { ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) } @@ -187,7 +182,7 @@ type rTryPrunePeer struct { } func (e rTryPrunePeer) String() string { - return fmt.Sprintf(": %v", e.time) + return fmt.Sprintf("rTryPrunePeer{%v}", e.time) } // ticker event for scheduling block requests @@ -197,7 +192,7 @@ type rTrySchedule struct { } func (e rTrySchedule) String() string { - return fmt.Sprintf(": %v", e.time) + return fmt.Sprintf("rTrySchedule{%v}", e.time) } // ticker for block processing @@ -205,6 +200,10 @@ type rProcessBlock struct { priorityNormal } +func (e rProcessBlock) String() string { + return "rProcessBlock" +} + // reactor generated events based on blockchain related messages from peers: // blockResponse message received from a peer type bcBlockResponse struct { @@ -215,6 +214,11 @@ type bcBlockResponse struct { block *types.Block } +func (resp bcBlockResponse) String() string { + return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", + resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) +} + // blockNoResponse message received from a peer type bcNoBlockResponse struct { priorityNormal @@ -223,6 +227,11 @@ type bcNoBlockResponse struct { height int64 } +func (resp bcNoBlockResponse) String() string { + return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", + resp.peerID, resp.height, resp.time) +} + // statusResponse message received from a peer type bcStatusResponse struct { priorityNormal @@ -232,12 +241,21 @@ type bcStatusResponse struct { height int64 } +func (resp bcStatusResponse) String() string { + return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", + resp.peerID, resp.height, resp.base, resp.time) +} + // new peer is connected type bcAddNewPeer struct { priorityNormal peerID p2p.ID } +func (resp bcAddNewPeer) String() string { + return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) +} + // existing peer is removed type bcRemovePeer struct { priorityHigh @@ -245,12 +263,20 @@ type bcRemovePeer struct { reason interface{} } +func (resp bcRemovePeer) String() string { + return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) +} + // resets the scheduler and processor state, e.g. following a switch from state syncing type bcResetState struct { priorityHigh state state.State } +func (e bcResetState) String() string { + return fmt.Sprintf("bcResetState{%v}", e.state) +} + // Takes the channel as a parameter to avoid race conditions on r.events. func (r *BlockchainReactor) demux(events <-chan Event) { var lastRate = 0.0 @@ -285,6 +311,9 @@ func (r *BlockchainReactor) demux(events <-chan Event) { defer doStatusTk.Stop() doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers + // Memoize the scSchedulerFail error to avoid printing it every scheduleFreq. + var scSchedulerFailErr error + // XXX: Extract timers to make testing atemporal for { select { @@ -349,15 +378,27 @@ func (r *BlockchainReactor) demux(events <-chan Event) { r.logger.Error("Error reporting peer", "err", err) } case scBlockRequest: - if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil { + peer := r.Switch.Peers().Get(event.peerID) + if peer == nil { + r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID) + continue + } + if err := r.io.sendBlockRequest(peer, event.height); err != nil { r.logger.Error("Error sending block request", "err", err) } case scFinishedEv: r.processor.send(event) r.scheduler.stop() case scSchedulerFail: - r.logger.Error("Scheduler failure", "err", event.reason.Error()) + if scSchedulerFailErr != event.reason { + r.logger.Error("Scheduler failure", "err", event.reason.Error()) + scSchedulerFailErr = event.reason + } case scPeersPruned: + // Remove peers from the processor. + for _, peerID := range event.peers { + r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) + } r.logger.Debug("Pruned peers", "count", len(event.peers)) case noOpEvent: default: @@ -420,40 +461,42 @@ func (r *BlockchainReactor) Stop() error { } // Receive implements Reactor by handling different message types. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + logger := r.logger.With("src", src.ID(), "chID", chID) + msg, err := bc.DecodeMsg(msgBytes) if err != nil { - r.logger.Error("error decoding message", - "src", src.ID(), "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + logger.Error("error decoding message", "err", err) _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) return } if err = bc.ValidateMsg(msg); err != nil { - r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + logger.Error("peer sent us invalid msg", "msg", msg, "err", err) _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) return } - r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg) + r.logger.Debug("Receive", "msg", msg) switch msg := msg.(type) { case *bcproto.StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil { - r.logger.Error("Could not send status message to peer", "src", src) + if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil { + logger.Error("Could not send status message to src peer") } case *bcproto.BlockRequest: block := r.store.LoadBlock(msg.Height) if block != nil { - if err = r.io.sendBlockToPeer(block, src.ID()); err != nil { - r.logger.Error("Could not send block message to peer: ", err) + if err = r.io.sendBlockToPeer(block, src); err != nil { + logger.Error("Could not send block message to src peer", "err", err) } } else { - r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height) - peerID := src.ID() - if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil { - r.logger.Error("Couldn't send block not found: ", err) + logger.Info("peer asking for a block we don't have", "height", msg.Height) + if err = r.io.sendBlockNotFound(msg.Height, src); err != nil { + logger.Error("Couldn't send block not found msg", "err", err) } } @@ -468,7 +511,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { r.mtx.RLock() bi, err := types.BlockFromProto(msg.Block) if err != nil { - r.logger.Error("error transitioning block from protobuf", "err", err) + logger.Error("error transitioning block from protobuf", "err", err) _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) return } @@ -493,10 +536,16 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // AddPeer implements Reactor interface func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID()) + err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer) + if err != nil { + r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err) + } + + err = r.io.sendStatusRequest(peer) if err != nil { - r.logger.Error("Could not send status message to peer new", "src", peer.ID, "height", r.SyncHeight()) + r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err) } + r.mtx.RLock() defer r.mtx.RUnlock() if r.events != nil { diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index a1ce2c1416..fb04b7e881 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -93,34 +93,37 @@ type mockSwitchIo struct { numStatusResponse int numBlockResponse int numNoBlockResponse int + numStatusRequest int } -func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error { +var _ iIO = (*mockSwitchIo)(nil) + +func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error { return nil } -func (sio *mockSwitchIo) sendStatusResponse(base, height int64, peerID p2p.ID) error { +func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { sio.mtx.Lock() defer sio.mtx.Unlock() sio.numStatusResponse++ return nil } -func (sio *mockSwitchIo) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { +func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { sio.mtx.Lock() defer sio.mtx.Unlock() sio.numBlockResponse++ return nil } -func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error { +func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error { sio.mtx.Lock() defer sio.mtx.Unlock() sio.numNoBlockResponse++ return nil } -func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool { +func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool { sio.mtx.Lock() defer sio.mtx.Unlock() sio.switchedToConsensus = true @@ -131,6 +134,13 @@ func (sio *mockSwitchIo) broadcastStatusRequest() error { return nil } +func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { + sio.mtx.Lock() + defer sio.mtx.Unlock() + sio.numStatusRequest++ + return nil +} + type testReactorParams struct { logger log.Logger genDoc *types.GenesisDoc diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go index 2d307a5920..b783ff6718 100644 --- a/blockchain/v2/routine.go +++ b/blockchain/v2/routine.go @@ -2,6 +2,7 @@ package v2 import ( "fmt" + "strings" "sync/atomic" "github.com/Workiva/go-datastructures/queue" @@ -11,6 +12,8 @@ import ( type handleFunc = func(event Event) (Event, error) +const historySize = 25 + // Routine is a structure that models a finite state machine as serialized // stream of events processed by a handle function. This Routine structure // handles the concurrency and messaging guarantees. Events are sent via @@ -21,6 +24,7 @@ type Routine struct { name string handle handleFunc queue *queue.PriorityQueue + history []Event out chan Event fin chan error rdy chan struct{} @@ -34,6 +38,7 @@ func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { name: name, handle: handleFunc, queue: queue.NewPriorityQueue(bufferSize, true), + history: make([]Event, 0, historySize), out: make(chan Event, bufferSize), rdy: make(chan struct{}, 1), fin: make(chan error, 1), @@ -53,13 +58,24 @@ func (rt *Routine) setMetrics(metrics *Metrics) { } func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name)) + rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) if !running { panic(fmt.Sprintf("%s is already running", rt.name)) } close(rt.rdy) defer func() { + if r := recover(); r != nil { + var ( + b strings.Builder + j int + ) + for i := len(rt.history) - 1; i >= 0; i-- { + fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) + j++ + } + panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) + } stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) if !stopped { panic(fmt.Sprintf("%s is failed to stop", rt.name)) @@ -82,7 +98,19 @@ func (rt *Routine) start() { return } rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v\n", rt.name, oEvent, oEvent)) + rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) + + // Skip rTrySchedule and rProcessBlock events as they clutter the history + // due to their frequency. + switch events[0].(type) { + case rTrySchedule: + case rProcessBlock: + default: + rt.history = append(rt.history, events[0].(Event)) + if len(rt.history) > historySize { + rt.history = rt.history[1:] + } + } rt.out <- oEvent } @@ -97,7 +125,7 @@ func (rt *Routine) send(event Event) bool { err := rt.queue.Put(event) if err != nil { rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name)) + rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) return false } @@ -122,7 +150,7 @@ func (rt *Routine) stop() { return } - rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name)) + rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) rt.queue.Dispose() // this should block until all queue items are free? } diff --git a/blockchain/v2/scheduler.go b/blockchain/v2/scheduler.go index b127ac7fa0..5986004fb5 100644 --- a/blockchain/v2/scheduler.go +++ b/blockchain/v2/scheduler.go @@ -2,6 +2,7 @@ package v2 import ( "bytes" + "errors" "fmt" "math" "sort" @@ -18,6 +19,10 @@ type scFinishedEv struct { reason string } +func (e scFinishedEv) String() string { + return fmt.Sprintf("scFinishedEv{%v}", e.reason) +} + // send a blockRequest message type scBlockRequest struct { priorityNormal @@ -25,6 +30,10 @@ type scBlockRequest struct { height int64 } +func (e scBlockRequest) String() string { + return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) +} + // a block has been received and validated by the scheduler type scBlockReceived struct { priorityNormal @@ -32,6 +41,10 @@ type scBlockReceived struct { block *types.Block } +func (e scBlockReceived) String() string { + return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) +} + // scheduler detected a peer error type scPeerError struct { priorityHigh @@ -40,7 +53,7 @@ type scPeerError struct { } func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError - peerID %s, err %s", e.peerID, e.reason) + return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) } // scheduler removed a set of peers (timed out or slow peer) @@ -49,6 +62,10 @@ type scPeersPruned struct { peers []p2p.ID } +func (e scPeersPruned) String() string { + return fmt.Sprintf("scPeersPruned{%v}", e.peers) +} + // XXX: make this fatal? // scheduler encountered a fatal error type scSchedulerFail struct { @@ -56,6 +73,10 @@ type scSchedulerFail struct { reason error } +func (e scSchedulerFail) String() string { + return fmt.Sprintf("scSchedulerFail{%v}", e.reason) +} + type blockState int const ( @@ -217,14 +238,13 @@ func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error { return nil } -func (sc *scheduler) removePeer(peerID p2p.ID) error { +func (sc *scheduler) removePeer(peerID p2p.ID) { peer, ok := sc.peers[peerID] if !ok { - return nil + return } - if peer.state == peerStateRemoved { - return fmt.Errorf("tried to remove peer %s in peerStateRemoved", peerID) + return } for height, pendingPeerID := range sc.pendingBlocks { @@ -258,8 +278,6 @@ func (sc *scheduler) removePeer(peerID p2p.ID) error { delete(sc.blockStates, h) } } - - return nil } // check if the blockPool is running low and add new blocks in New state to be requested. @@ -284,17 +302,16 @@ func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error peer := sc.ensurePeer(peerID) if peer.state == peerStateRemoved { - return fmt.Errorf("cannot set peer height for a peer in peerStateRemoved") + return nil // noop } if height < peer.height { - if err := sc.removePeer(peerID); err != nil { - return err - } + sc.removePeer(peerID) return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) } if base > height { + sc.removePeer(peerID) return fmt.Errorf("cannot set peer base higher than its height") } @@ -348,15 +365,9 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) { sc.blockStates[height] = state } +// CONTRACT: peer exists and in Ready state. func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("received block from unknown peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot receive blocks from not ready peer %s", peerID) - } + peer := sc.peers[peerID] if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) @@ -412,17 +423,17 @@ func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) er } func (sc *scheduler) markProcessed(height int64) error { + // It is possible that a peer error or timeout is handled after the processor + // has processed the block but before the scheduler received this event, so + // when pcBlockProcessed event is received, the block had been requested + // again => don't check the block state. sc.lastAdvance = time.Now() - state := sc.getStateAtHeight(height) - if state != blockStateReceived { - return fmt.Errorf("cannot mark height %d received from block state %s", height, state) - } - - sc.height++ + sc.height = height + 1 + delete(sc.pendingBlocks, height) + delete(sc.pendingTime, height) delete(sc.receivedBlocks, height) delete(sc.blockStates, height) sc.addNewBlocks() - return nil } @@ -517,12 +528,13 @@ func (peers PeerByID) Swap(i, j int) { func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { err := sc.touchPeer(event.peerID, event.time) if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil + // peer does not exist OR not ready + return noOp, nil } err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) if err != nil { - _ = sc.removePeer(event.peerID) + sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, reason: err}, nil } @@ -530,16 +542,14 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { } func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - if len(sc.peers) == 0 { - return noOp, nil - } - + // No such peer or peer was removed. peer, ok := sc.peers[event.peerID] if !ok || peer.state == peerStateRemoved { return noOp, nil } + // The peer may have been just removed due to errors, low speed or timeouts. - _ = sc.removePeer(event.peerID) + sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", @@ -548,13 +558,11 @@ func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, erro func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { if event.height != sc.height { - panic(fmt.Sprintf("processed height %d but expected height %d", event.height, sc.height)) + panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) } + err := sc.markProcessed(event.height) if err != nil { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, - // so when pcBlockProcessed event is received the block had been requested again. return scSchedulerFail{reason: err}, nil } @@ -568,13 +576,10 @@ func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) // Handles an error from the processor. The processor had already cleaned the blocks from // the peers included in this event. Just attempt to remove the peers. func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - if len(sc.peers) == 0 { - return noOp, nil - } // The peers may have been just removed due to errors, low speed or timeouts. - _ = sc.removePeer(event.firstPeerID) + sc.removePeer(event.firstPeerID) if event.firstPeerID != event.secondPeerID { - _ = sc.removePeer(event.secondPeerID) + sc.removePeer(event.secondPeerID) } if sc.allBlocksProcessed() { @@ -590,20 +595,18 @@ func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { } func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - err := sc.removePeer(event.peerID) - if err != nil { - // XXX - It is possible that the removePeer fails here for legitimate reasons - // for example if a peer timeout or error was handled just before this. - return scSchedulerFail{reason: err}, nil - } + sc.removePeer(event.peerID) + if sc.allBlocksProcessed() { return scFinishedEv{reason: "removed peer"}, nil } - return noOp, nil + + // Return scPeerError so the peer (and all associated blocks) is removed from + // the processor. + return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil } func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. timeHeightAsked, ok := sc.pendingTime[sc.height] if ok && time.Since(timeHeightAsked) > sc.peerTimeout { @@ -611,9 +614,7 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { // from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer // will be timed out even if it sends blocks at higher heights but prevents progress by // not sending the block at current height. - if err := sc.removePeer(sc.pendingBlocks[sc.height]); err != nil { - return nil, err - } + sc.removePeer(sc.pendingBlocks[sc.height]) } prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time) @@ -621,11 +622,7 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { return noOp, nil } for _, peerID := range prunablePeers { - err := sc.removePeer(peerID) - if err != nil { - // Should never happen as prunablePeers() returns only existing peers in Ready state. - panic("scheduler data corruption") - } + sc.removePeer(peerID) } // If all blocks are processed we should finish. @@ -634,7 +631,6 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { } return scPeersPruned{peers: prunablePeers}, nil - } func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { diff --git a/blockchain/v2/scheduler_test.go b/blockchain/v2/scheduler_test.go index 4d7b89090b..bf4b320f56 100644 --- a/blockchain/v2/scheduler_test.go +++ b/blockchain/v2/scheduler_test.go @@ -418,7 +418,6 @@ func TestScRemovePeer(t *testing.T) { "P1": {height: 10, state: peerStateRemoved}, "P2": {height: 11, state: peerStateReady}}, allB: []int64{8, 9, 10, 11}}, - wantErr: true, }, { name: "remove Ready peer with blocks requested", @@ -492,9 +491,7 @@ func TestScRemovePeer(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - if err := sc.removePeer(tt.args.peerID); (err != nil) != tt.wantErr { - t.Errorf("removePeer() wantErr %v, error = %v", tt.wantErr, err) - } + sc.removePeer(tt.args.peerID) wantSc := newTestScheduler(tt.wantFields) assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) }) @@ -534,7 +531,6 @@ func TestScSetPeerRange(t *testing.T) { peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, args: args{peerID: "P1", height: 4}, wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, }, { name: "decrease height of single peer", @@ -586,8 +582,7 @@ func TestScSetPeerRange(t *testing.T) { allB: []int64{1, 2, 3, 4}}, args: args{peerID: "P1", base: 6, height: 5}, wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, wantErr: true, }, { @@ -993,19 +988,20 @@ func TestScMarkProcessed(t *testing.T) { { name: "processed an unreceived block", fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]p2p.ID{1: "P1"}}, + height: 2, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{2}, + pending: map[int64]p2p.ID{2: "P1"}, + pendingTime: map[int64]time.Time{2: now}, + targetPending: 1, + }, args: args{height: 2}, wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]p2p.ID{1: "P1"}}, - wantErr: true, + height: 3, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{3}, + targetPending: 1, + }, }, { name: "mark processed success", @@ -1416,13 +1412,13 @@ func TestScHandleBlockResponse(t *testing.T) { name: "empty scheduler", fields: scTestParams{}, args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "block from removed peer", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "block we haven't asked for", @@ -1441,7 +1437,7 @@ func TestScHandleBlockResponse(t *testing.T) { pendingTime: map[int64]time.Time{6: now}, }, args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "block with bad timestamp", @@ -1575,7 +1571,7 @@ func TestScHandleBlockProcessed(t *testing.T) { name: "empty scheduler", fields: scTestParams{height: 6}, args: args{event: processed6FromP1}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "processed block we don't have", @@ -1587,7 +1583,7 @@ func TestScHandleBlockProcessed(t *testing.T) { pendingTime: map[int64]time.Time{6: now}, }, args: args{event: processed6FromP1}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "processed block ok, we processed all blocks", @@ -2001,7 +1997,7 @@ func TestScHandleStatusResponse(t *testing.T) { name: "increase height of removed peer", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 0000000000..dc56781dd4 --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,13 @@ +# The version of the generation template. +# Required. +# The only currently-valid value is v1beta1. +version: v1beta1 + +# The plugins to run. +plugins: + # The name of the plugin. + - name: gogofaster + # The the relative output directory. + out: proto + # Any options to provide to the plugin. + opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative diff --git a/buf.yaml b/buf.yaml index d21611209c..cc4aced576 100644 --- a/buf.yaml +++ b/buf.yaml @@ -1,3 +1,5 @@ +version: v1beta1 + build: roots: - proto diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index 053ac2ff7d..d5d6bede75 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -5,28 +5,30 @@ import ( "github.com/spf13/cobra" - tmos "github.com/lazyledger/lazyledger-core/libs/os" + tmjson "github.com/lazyledger/lazyledger-core/libs/json" "github.com/lazyledger/lazyledger-core/p2p" ) -// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to -// the standard output. +// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded +// NodeKey to the standard output. var GenNodeKeyCmd = &cobra.Command{ - Use: "gen_node_key", - Short: "Generate a node key for this node and print its ID", - RunE: genNodeKey, + Use: "gen-node-key", + Aliases: []string{"gen_node_key"}, + Short: "Generate a new node key", + RunE: genNodeKey, + PreRun: deprecateSnakeCase, } func genNodeKey(cmd *cobra.Command, args []string) error { - nodeKeyFile := config.NodeKeyFile() - if tmos.FileExists(nodeKeyFile) { - return fmt.Errorf("node key at %s already exists", nodeKeyFile) - } + nodeKey := p2p.GenNodeKey() - nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile) + bz, err := tmjson.Marshal(nodeKey) if err != nil { - return err + return fmt.Errorf("nodeKey -> json: %w", err) } - fmt.Println(nodeKey.ID()) + + fmt.Printf(`%v +`, string(bz)) + return nil } diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 384d17abf7..a4d07d0e42 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -7,22 +7,37 @@ import ( tmjson "github.com/lazyledger/lazyledger-core/libs/json" "github.com/lazyledger/lazyledger-core/privval" + "github.com/lazyledger/lazyledger-core/types" ) // GenValidatorCmd allows the generation of a keypair for a // validator. var GenValidatorCmd = &cobra.Command{ - Use: "gen_validator", - Short: "Generate new validator keypair", - Run: genValidator, + Use: "gen-validator", + Aliases: []string{"gen_validator"}, + Short: "Generate new validator keypair", + RunE: genValidator, + PreRun: deprecateSnakeCase, } -func genValidator(cmd *cobra.Command, args []string) { - pv := privval.GenFilePV("", "") +func init() { + GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") +} + +func genValidator(cmd *cobra.Command, args []string) error { + pv, err := privval.GenFilePV("", "", keyType) + if err != nil { + return err + } + jsbz, err := tmjson.Marshal(pv) if err != nil { - panic(err) + return fmt.Errorf("validator -> json: %w", err) } + fmt.Printf(`%v `, string(jsbz)) + + return nil } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index 9c9f646034..c8b766190b 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -10,6 +10,7 @@ import ( tmrand "github.com/lazyledger/lazyledger-core/libs/rand" "github.com/lazyledger/lazyledger-core/p2p" "github.com/lazyledger/lazyledger-core/privval" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" "github.com/lazyledger/lazyledger-core/types" tmtime "github.com/lazyledger/lazyledger-core/types/time" ) @@ -21,6 +22,15 @@ var InitFilesCmd = &cobra.Command{ RunE: initFiles, } +var ( + keyType string +) + +func init() { + InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") +} + func initFiles(cmd *cobra.Command, args []string) error { return initFilesWithConfig(config) } @@ -29,13 +39,19 @@ func initFilesWithConfig(config *cfg.Config) error { // private validator privValKeyFile := config.PrivValidatorKeyFile() privValStateFile := config.PrivValidatorStateFile() - var pv *privval.FilePV + var ( + pv *privval.FilePV + err error + ) if tmos.FileExists(privValKeyFile) { pv = privval.LoadFilePV(privValKeyFile, privValStateFile) logger.Info("Found private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) } else { - pv = privval.GenFilePV(privValKeyFile, privValStateFile) + pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType) + if err != nil { + return err + } pv.Save() logger.Info("Generated private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) @@ -56,11 +72,17 @@ func initFilesWithConfig(config *cfg.Config) error { if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) } else { + genDoc := types.GenesisDoc{ ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), GenesisTime: tmtime.Now(), ConsensusParams: types.DefaultConsensusParams(), } + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } + } pubKey, err := pv.GetPubKey() if err != nil { return fmt.Errorf("can't get pubkey: %w", err) diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index 95d771c295..dab520661b 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -61,7 +61,7 @@ var ( primaryAddr string witnessAddrsJoined string chainID string - home string + dir string maxOpenConnections int sequential bool @@ -83,8 +83,8 @@ func init() { "connect to a Tendermint node at this address") LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", "tendermint nodes to cross-check the primary node, comma-separated") - LightCmd.Flags().StringVar(&home, "home-dir", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), - "specify the home directory") + LightCmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), + "specify the directory") LightCmd.Flags().IntVar( &maxOpenConnections, "max-open-connections", @@ -122,7 +122,7 @@ func runProxy(cmd *cobra.Command, args []string) error { witnessesAddrs = strings.Split(witnessAddrsJoined, ",") } - db, err := dbm.NewGoLevelDB("light-client-db", home) + db, err := dbm.NewGoLevelDB("light-client-db", dir) if err != nil { return fmt.Errorf("can't create a db: %w", err) } diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go index 77b3f89568..fe14b840f3 100644 --- a/cmd/tendermint/commands/probe_upnp.go +++ b/cmd/tendermint/commands/probe_upnp.go @@ -11,9 +11,11 @@ import ( // ProbeUpnpCmd adds capabilities to test the UPnP functionality. var ProbeUpnpCmd = &cobra.Command{ - Use: "probe_upnp", - Short: "Test UPnP functionality", - RunE: probeUpnp, + Use: "probe-upnp", + Aliases: []string{"probe_upnp"}, + Short: "Test UPnP functionality", + RunE: probeUpnp, + PreRun: deprecateSnakeCase, } func probeUpnp(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index 84f51f04af..601c18654a 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -18,9 +18,11 @@ var ReplayCmd = &cobra.Command{ // ReplayConsoleCmd allows replaying of messages from the WAL in a // console. var ReplayConsoleCmd = &cobra.Command{ - Use: "replay_console", - Short: "Replay messages from WAL in a console", + Use: "replay-console", + Aliases: []string{"replay_console"}, + Short: "Replay messages from WAL in a console", Run: func(cmd *cobra.Command, args []string) { consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) }, + PreRun: deprecateSnakeCase, } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 0de8682744..3ce4e1ba65 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -8,27 +8,34 @@ import ( "github.com/lazyledger/lazyledger-core/libs/log" tmos "github.com/lazyledger/lazyledger-core/libs/os" "github.com/lazyledger/lazyledger-core/privval" + "github.com/lazyledger/lazyledger-core/types" ) // ResetAllCmd removes the database of this Tendermint core // instance. var ResetAllCmd = &cobra.Command{ - Use: "unsafe_reset_all", - Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", - Run: resetAll, + Use: "unsafe-reset-all", + Aliases: []string{"unsafe_reset_all"}, + Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", + Run: resetAll, + PreRun: deprecateSnakeCase, } var keepAddrBook bool func init() { ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") + ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") } // ResetPrivValidatorCmd resets the private validator files. var ResetPrivValidatorCmd = &cobra.Command{ - Use: "unsafe_reset_priv_validator", - Short: "(unsafe) Reset this node's validator to genesis state", - Run: resetPrivValidator, + Use: "unsafe-reset-priv-validator", + Aliases: []string{"unsafe_reset_priv_validator"}, + Short: "(unsafe) Reset this node's validator to genesis state", + Run: resetPrivValidator, + PreRun: deprecateSnakeCase, } // XXX: this is totally unsafe. @@ -71,7 +78,10 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) { logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile, "stateFile", privValStateFile) } else { - pv := privval.GenFilePV(privValKeyFile, privValStateFile) + pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType) + if err != nil { + panic(err) + } pv.Save() logger.Info("Generated private validator file", "keyFile", privValKeyFile, "stateFile", privValStateFile) diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index f45f6586ff..5fa6d6cc0a 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -3,6 +3,7 @@ package commands import ( "fmt" "os" + "strings" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -23,7 +24,7 @@ func init() { } func registerFlagsRootCmd(cmd *cobra.Command) { - cmd.PersistentFlags().String("log_level", config.LogLevel, "log level") + cmd.PersistentFlags().String("log-level", config.LogLevel, "log level") } // ParseConfig retrieves the default environment configuration, @@ -36,16 +37,16 @@ func ParseConfig() (*cfg.Config, error) { } conf.SetRoot(conf.RootDir) cfg.EnsureRoot(conf.RootDir) - if err = conf.ValidateBasic(); err != nil { + if err := conf.ValidateBasic(); err != nil { return nil, fmt.Errorf("error in config file: %v", err) } - return conf, err + return conf, nil } // RootCmd is the root command for Tendermint core. var RootCmd = &cobra.Command{ Use: "tendermint", - Short: "Tendermint Core (BFT Consensus) in Go", + Short: "BFT state machine replication for applications in any programming languages", PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { if cmd.Name() == VersionCmd.Name() { return nil @@ -68,3 +69,10 @@ var RootCmd = &cobra.Command{ return nil }, } + +// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 +func deprecateSnakeCase(cmd *cobra.Command, args []string) { + if strings.Contains(cmd.CalledAs(), "_") { + fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") + } +} diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index 130dbec951..bc01e9ecb2 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -99,7 +99,7 @@ func TestRootFlagsEnv(t *testing.T) { logLevel string }{ {[]string{"--log", "debug"}, nil, defaultLogLvl}, // wrong flag - {[]string{"--log_level", "debug"}, nil, "debug"}, // right flag + {[]string{"--log-level", "debug"}, nil, "debug"}, // right flag {nil, map[string]string{"TM_LOW": "debug"}, defaultLogLvl}, // wrong env flag {nil, map[string]string{"MT_LOG_LEVEL": "debug"}, defaultLogLvl}, // wrong env prefix {nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env @@ -120,7 +120,7 @@ func TestRootConfig(t *testing.T) { // write non-default config nonDefaultLogLvl := "abc:debug" cvals := map[string]string{ - "log_level": nonDefaultLogLvl, + "log-level": nonDefaultLogLvl, } cases := []struct { @@ -130,7 +130,7 @@ func TestRootConfig(t *testing.T) { logLvl string }{ {nil, nil, nonDefaultLogLvl}, // should load config - {[]string{"--log_level=abc:info"}, nil, "abc:info"}, // flag over rides + {[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides {nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 806d5f4e2e..db03590b65 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -26,24 +26,24 @@ func AddNodeFlags(cmd *cobra.Command) { // priv val flags cmd.Flags().String( - "priv_validator_laddr", + "priv-validator-laddr", config.PrivValidatorListenAddr, - "socket address to listen on for connections from external priv_validator process") + "socket address to listen on for connections from external priv-validator process") // node flags - cmd.Flags().Bool("fast_sync", config.FastSyncMode, "fast blockchain syncing") + cmd.Flags().Bool("fast-sync", config.FastSyncMode, "fast blockchain syncing") cmd.Flags().BytesHexVar( &genesisHash, - "genesis_hash", + "genesis-hash", []byte{}, "optional SHA-256 hash of the genesis file") - cmd.Flags().Int64("consensus.double_sign_check_height", config.Consensus.DoubleSignCheckHeight, + cmd.Flags().Int64("consensus.double-sign-check-height", config.Consensus.DoubleSignCheckHeight, "how many blocks to look back to check existence of the node's "+ "consensus votes before joining consensus") // abci flags cmd.Flags().String( - "proxy_app", + "proxy-app", config.ProxyApp, "proxy app address, or one of: 'kvstore',"+ " 'persistent_kvstore',"+ @@ -54,11 +54,11 @@ func AddNodeFlags(cmd *cobra.Command) { // rpc flags cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") cmd.Flags().String( - "rpc.grpc_laddr", + "rpc.grpc-laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required") cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") - cmd.Flags().String("rpc.pprof_laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") + cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") // p2p flags cmd.Flags().String( @@ -66,31 +66,31 @@ func AddNodeFlags(cmd *cobra.Command) { config.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") - cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().String("p2p.unconditional_peer_ids", + cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") + cmd.Flags().String("p2p.unconditional-peer-ids", config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") - cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "enable/disable seed mode") - cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") + cmd.Flags().Bool("p2p.seed-mode", config.P2P.SeedMode, "enable/disable seed mode") + cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") // consensus flags cmd.Flags().Bool( - "consensus.create_empty_blocks", + "consensus.create-empty-blocks", config.Consensus.CreateEmptyBlocks, "set this to false to only produce blocks when there are txs or when the AppHash changes") cmd.Flags().String( - "consensus.create_empty_blocks_interval", + "consensus.create-empty-blocks-interval", config.Consensus.CreateEmptyBlocksInterval.String(), "the possible interval between empty blocks") // db flags cmd.Flags().String( - "db_backend", + "db-backend", config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") cmd.Flags().String( - "db_dir", + "db-dir", config.DBPath, "database directory") } @@ -99,8 +99,9 @@ func AddNodeFlags(cmd *cobra.Command) { // It can be used with a custom PrivValidator and in-process ABCI application. func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command { cmd := &cobra.Command{ - Use: "node", - Short: "Run the tendermint node", + Use: "start", + Aliases: []string{"node", "run"}, + Short: "Run the tendermint node", RunE: func(cmd *cobra.Command, args []string) error { if err := checkGenesisHash(config); err != nil { return err diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go index 119bc444f6..da450319aa 100644 --- a/cmd/tendermint/commands/show_node_id.go +++ b/cmd/tendermint/commands/show_node_id.go @@ -10,9 +10,11 @@ import ( // ShowNodeIDCmd dumps node's ID to the standard output. var ShowNodeIDCmd = &cobra.Command{ - Use: "show_node_id", - Short: "Show this node's ID", - RunE: showNodeID, + Use: "show-node-id", + Aliases: []string{"show_node_id"}, + Short: "Show this node's ID", + RunE: showNodeID, + PreRun: deprecateSnakeCase, } func showNodeID(cmd *cobra.Command, args []string) error { @@ -21,6 +23,6 @@ func showNodeID(cmd *cobra.Command, args []string) error { return err } - fmt.Println(nodeKey.ID()) + fmt.Println(nodeKey.ID) return nil } diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index 7ef7751e87..34870bcf2c 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -12,9 +12,11 @@ import ( // ShowValidatorCmd adds capabilities for showing the validator info. var ShowValidatorCmd = &cobra.Command{ - Use: "show_validator", - Short: "Show this node's validator info", - RunE: showValidator, + Use: "show-validator", + Aliases: []string{"show_validator"}, + Short: "Show this node's validator info", + RunE: showValidator, + PreRun: deprecateSnakeCase, } func showValidator(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index 2519483fc3..24b84b6703 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -15,6 +15,7 @@ import ( tmrand "github.com/lazyledger/lazyledger-core/libs/rand" "github.com/lazyledger/lazyledger-core/p2p" "github.com/lazyledger/lazyledger-core/privval" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" "github.com/lazyledger/lazyledger-core/types" tmtime "github.com/lazyledger/lazyledger-core/types/time" ) @@ -74,6 +75,8 @@ func init() { "P2P Port") TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, "randomize the moniker for each generated node") + TestnetFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") } // TestnetFilesCmd allows initialisation of files for a Tendermint testnet. @@ -85,7 +88,7 @@ necessary files (private validator, genesis, config, etc.). Note, strict routability for addresses is turned off in the config file. -Optionally, it will fill in persistent_peers list in config file using either hostnames or IPs. +Optionally, it will fill in persistent-peers list in config file using either hostnames or IPs. Example: @@ -180,10 +183,15 @@ func testnetFiles(cmd *cobra.Command, args []string) error { // Generate genesis doc from generated validators genDoc := &types.GenesisDoc{ ChainID: "chain-" + tmrand.Str(6), - ConsensusParams: types.DefaultConsensusParams(), GenesisTime: tmtime.Now(), InitialHeight: initialHeight, Validators: genVals, + ConsensusParams: types.DefaultConsensusParams(), + } + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } } // Write genesis file. @@ -255,7 +263,7 @@ func persistentPeersString(config *cfg.Config) (string, error) { if err != nil { return "", err } - persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) + persistentPeers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) } return strings.Join(persistentPeers, ","), nil } diff --git a/cmd/tendermint/commands/version.go b/cmd/tendermint/commands/version.go index acfdef4340..d858fc86a0 100644 --- a/cmd/tendermint/commands/version.go +++ b/cmd/tendermint/commands/version.go @@ -13,6 +13,6 @@ var VersionCmd = &cobra.Command{ Use: "version", Short: "Show version info", Run: func(cmd *cobra.Command, args []string) { - fmt.Println(version.Version) + fmt.Println(version.TMCoreSemVer) }, } diff --git a/config/config.go b/config/config.go index f7f7b62a5d..d78507cd8d 100644 --- a/config/config.go +++ b/config/config.go @@ -63,7 +63,7 @@ type Config struct { StateSync *StateSyncConfig `mapstructure:"statesync"` FastSync *FastSyncConfig `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` - TxIndex *TxIndexConfig `mapstructure:"tx_index"` + TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` } @@ -151,7 +151,7 @@ type BaseConfig struct { //nolint: maligned // TCP or UNIX socket address of the ABCI application, // or the name of an ABCI application compiled in with the Tendermint binary - ProxyApp string `mapstructure:"proxy_app"` + ProxyApp string `mapstructure:"proxy-app"` // A custom human readable name for this node Moniker string `mapstructure:"moniker"` @@ -159,7 +159,7 @@ type BaseConfig struct { //nolint: maligned // If this node is many blocks behind the tip of the chain, FastSync // allows them to catchup quickly by downloading blocks in parallel // and verifying their commits - FastSyncMode bool `mapstructure:"fast_sync"` + FastSyncMode bool `mapstructure:"fast-sync"` // Database backend: goleveldb | cleveldb | boltdb | rocksdb // * goleveldb (github.com/syndtr/goleveldb - most popular implementation) @@ -180,39 +180,39 @@ type BaseConfig struct { //nolint: maligned // * badgerdb (uses github.com/dgraph-io/badger) // - EXPERIMENTAL // - use badgerdb build tag (go build -tags badgerdb) - DBBackend string `mapstructure:"db_backend"` + DBBackend string `mapstructure:"db-backend"` // Database directory - DBPath string `mapstructure:"db_dir"` + DBPath string `mapstructure:"db-dir"` // Output level for logging - LogLevel string `mapstructure:"log_level"` + LogLevel string `mapstructure:"log-level"` // Output format: 'plain' (colored text) or 'json' - LogFormat string `mapstructure:"log_format"` + LogFormat string `mapstructure:"log-format"` // Path to the JSON file containing the initial validator set and other meta data - Genesis string `mapstructure:"genesis_file"` + Genesis string `mapstructure:"genesis-file"` // Path to the JSON file containing the private key to use as a validator in the consensus protocol - PrivValidatorKey string `mapstructure:"priv_validator_key_file"` + PrivValidatorKey string `mapstructure:"priv-validator-key-file"` // Path to the JSON file containing the last sign state of a validator - PrivValidatorState string `mapstructure:"priv_validator_state_file"` + PrivValidatorState string `mapstructure:"priv-validator-state-file"` // TCP or UNIX socket address for Tendermint to listen on for // connections from an external PrivValidator process - PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"` + PrivValidatorListenAddr string `mapstructure:"priv-validator-laddr"` // A JSON file containing the private key to use for p2p authenticated encryption - NodeKey string `mapstructure:"node_key_file"` + NodeKey string `mapstructure:"node-key-file"` // Mechanism to connect to the ABCI application: socket | grpc ABCI string `mapstructure:"abci"` // If true, query the ABCI app on connecting to a new peer // so the app can decide if we should keep the connection or not - FilterPeers bool `mapstructure:"filter_peers"` // false + FilterPeers bool `mapstructure:"filter-peers"` // false } // DefaultBaseConfig returns a default base configuration for a Tendermint node @@ -279,7 +279,7 @@ func (cfg BaseConfig) ValidateBasic() error { switch cfg.LogFormat { case LogFormatPlain, LogFormatJSON: default: - return errors.New("unknown log_format (must be 'plain' or 'json')") + return errors.New("unknown log format (must be 'plain' or 'json')") } return nil } @@ -309,58 +309,58 @@ type RPCConfig struct { // If the special '*' value is present in the list, all origins will be allowed. // An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com). // Only one wildcard can be used per origin. - CORSAllowedOrigins []string `mapstructure:"cors_allowed_origins"` + CORSAllowedOrigins []string `mapstructure:"cors-allowed-origins"` // A list of methods the client is allowed to use with cross-domain requests. - CORSAllowedMethods []string `mapstructure:"cors_allowed_methods"` + CORSAllowedMethods []string `mapstructure:"cors-allowed-methods"` // A list of non simple headers the client is allowed to use with cross-domain requests. - CORSAllowedHeaders []string `mapstructure:"cors_allowed_headers"` + CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"` // TCP or UNIX socket address for the gRPC server to listen on // NOTE: This server only supports /broadcast_tx_commit - GRPCListenAddress string `mapstructure:"grpc_laddr"` + GRPCListenAddress string `mapstructure:"grpc-laddr"` // Maximum number of simultaneous connections. - // Does not include RPC (HTTP&WebSocket) connections. See max_open_connections + // Does not include RPC (HTTP&WebSocket) connections. See max-open-connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. - GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"` + GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"` - // Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool + // Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool Unsafe bool `mapstructure:"unsafe"` // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc_max_open_connections + // Does not include gRPC connections. See grpc-max-open-connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} // 1024 - 40 - 10 - 50 = 924 = ~900 - MaxOpenConnections int `mapstructure:"max_open_connections"` + MaxOpenConnections int `mapstructure:"max-open-connections"` // Maximum number of unique clientIDs that can /subscribe // If you're using /broadcast_tx_commit, set to the estimated maximum number // of broadcast_tx_commit calls per block. - MaxSubscriptionClients int `mapstructure:"max_subscription_clients"` + MaxSubscriptionClients int `mapstructure:"max-subscription-clients"` // Maximum number of unique queries a given client can /subscribe to // If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set // to the estimated maximum number of broadcast_tx_commit calls per block. - MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"` + MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"` // How long to wait for a tx to be committed during /broadcast_tx_commit // WARNING: Using a value larger than 10s will result in increasing the // global HTTP write timeout, which applies to all connections and endpoints. // See https://github.com/tendermint/tendermint/issues/3435 - TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` + TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout-broadcast-tx-commit"` // Maximum size of request body, in bytes - MaxBodyBytes int64 `mapstructure:"max_body_bytes"` + MaxBodyBytes int64 `mapstructure:"max-body-bytes"` // Maximum size of request header, in bytes - MaxHeaderBytes int `mapstructure:"max_header_bytes"` + MaxHeaderBytes int `mapstructure:"max-header-bytes"` // The path to a file containing certificate that is used to create the HTTPS server. // Migth be either absolute path or path related to tendermint's config directory. @@ -369,19 +369,19 @@ type RPCConfig struct { // the certFile should be the concatenation of the server's certificate, any intermediates, // and the CA's certificate. // - // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. + // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. // Otherwise, HTTP server is run. - TLSCertFile string `mapstructure:"tls_cert_file"` + TLSCertFile string `mapstructure:"tls-cert-file"` // The path to a file containing matching private key that is used to create the HTTPS server. // Migth be either absolute path or path related to tendermint's config directory. // - // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. + // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. // Otherwise, HTTP server is run. - TLSKeyFile string `mapstructure:"tls_key_file"` + TLSKeyFile string `mapstructure:"tls-key-file"` // pprof listen address (https://golang.org/pkg/net/http/pprof) - PprofListenAddress string `mapstructure:"pprof_laddr"` + PprofListenAddress string `mapstructure:"pprof-laddr"` } // DefaultRPCConfig returns a default configuration for the RPC server @@ -422,25 +422,25 @@ func TestRPCConfig() *RPCConfig { // returns an error if any check fails. func (cfg *RPCConfig) ValidateBasic() error { if cfg.GRPCMaxOpenConnections < 0 { - return errors.New("grpc_max_open_connections can't be negative") + return errors.New("grpc-max-open-connections can't be negative") } if cfg.MaxOpenConnections < 0 { - return errors.New("max_open_connections can't be negative") + return errors.New("max-open-connections can't be negative") } if cfg.MaxSubscriptionClients < 0 { - return errors.New("max_subscription_clients can't be negative") + return errors.New("max-subscription-clients can't be negative") } if cfg.MaxSubscriptionsPerClient < 0 { - return errors.New("max_subscriptions_per_client can't be negative") + return errors.New("max-subscriptions-per-client can't be negative") } if cfg.TimeoutBroadcastTxCommit < 0 { - return errors.New("timeout_broadcast_tx_commit can't be negative") + return errors.New("timeout-broadcast-tx-commit can't be negative") } if cfg.MaxBodyBytes < 0 { - return errors.New("max_body_bytes can't be negative") + return errors.New("max-body-bytes can't be negative") } if cfg.MaxHeaderBytes < 0 { - return errors.New("max_header_bytes can't be negative") + return errors.New("max-header-bytes can't be negative") } return nil } @@ -481,48 +481,48 @@ type P2PConfig struct { //nolint: maligned ListenAddress string `mapstructure:"laddr"` // Address to advertise to peers for them to dial - ExternalAddress string `mapstructure:"external_address"` + ExternalAddress string `mapstructure:"external-address"` // Comma separated list of seed nodes to connect to // We only use these if we can’t connect to peers in the addrbook Seeds string `mapstructure:"seeds"` // Comma separated list of nodes to keep persistent connections to - PersistentPeers string `mapstructure:"persistent_peers"` + PersistentPeers string `mapstructure:"persistent-peers"` // UPNP port forwarding UPNP bool `mapstructure:"upnp"` // Path to address book - AddrBook string `mapstructure:"addr_book_file"` + AddrBook string `mapstructure:"addr-book-file"` // Set true for strict address routability rules // Set false for private or local networks - AddrBookStrict bool `mapstructure:"addr_book_strict"` + AddrBookStrict bool `mapstructure:"addr-book-strict"` // Maximum number of inbound peers - MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"` + MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"` // Maximum number of outbound peers to connect to, excluding persistent peers - MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"` + MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"` // List of node IDs, to which a connection will be (re)established ignoring any existing limits - UnconditionalPeerIDs string `mapstructure:"unconditional_peer_ids"` + UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"` // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent_peers_max_dial_period"` + PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"` // Time to wait before flushing messages out on the connection - FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"` + FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` // Maximum size of a message packet payload, in bytes - MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` + MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"` // Rate at which packets can be sent, in bytes/second - SendRate int64 `mapstructure:"send_rate"` + SendRate int64 `mapstructure:"send-rate"` // Rate at which packets can be received, in bytes/second - RecvRate int64 `mapstructure:"recv_rate"` + RecvRate int64 `mapstructure:"recv-rate"` // Set true to enable the peer-exchange reactor PexReactor bool `mapstructure:"pex"` @@ -531,25 +531,22 @@ type P2PConfig struct { //nolint: maligned // peers. If another node asks it for addresses, it responds and disconnects. // // Does not work if the peer-exchange reactor is disabled. - SeedMode bool `mapstructure:"seed_mode"` + SeedMode bool `mapstructure:"seed-mode"` // Comma separated list of peer IDs to keep private (will not be gossiped to // other peers) - PrivatePeerIDs string `mapstructure:"private_peer_ids"` + PrivatePeerIDs string `mapstructure:"private-peer-ids"` // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"` + AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` // Peer connection configuration. - HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"` - DialTimeout time.Duration `mapstructure:"dial_timeout"` + HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` + DialTimeout time.Duration `mapstructure:"dial-timeout"` // Testing params. // Force dial to fail - TestDialFail bool `mapstructure:"test_dial_fail"` - // FUzz connection - TestFuzz bool `mapstructure:"test_fuzz"` - TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"` + TestDialFail bool `mapstructure:"test-dial-fail"` } // DefaultP2PConfig returns a default configuration for the peer-to-peer layer @@ -573,8 +570,6 @@ func DefaultP2PConfig() *P2PConfig { HandshakeTimeout: 20 * time.Second, DialTimeout: 3 * time.Second, TestDialFail: false, - TestFuzz: false, - TestFuzzConfig: DefaultFuzzConnConfig(), } } @@ -596,49 +591,29 @@ func (cfg *P2PConfig) AddrBookFile() string { // returns an error if any check fails. func (cfg *P2PConfig) ValidateBasic() error { if cfg.MaxNumInboundPeers < 0 { - return errors.New("max_num_inbound_peers can't be negative") + return errors.New("max-num-inbound-peers can't be negative") } if cfg.MaxNumOutboundPeers < 0 { - return errors.New("max_num_outbound_peers can't be negative") + return errors.New("max-num-outbound-peers can't be negative") } if cfg.FlushThrottleTimeout < 0 { - return errors.New("flush_throttle_timeout can't be negative") + return errors.New("flush-throttle-timeout can't be negative") } if cfg.PersistentPeersMaxDialPeriod < 0 { - return errors.New("persistent_peers_max_dial_period can't be negative") + return errors.New("persistent-peers-max-dial-period can't be negative") } if cfg.MaxPacketMsgPayloadSize < 0 { - return errors.New("max_packet_msg_payload_size can't be negative") + return errors.New("max-packet-msg-payload-size can't be negative") } if cfg.SendRate < 0 { - return errors.New("send_rate can't be negative") + return errors.New("send-rate can't be negative") } if cfg.RecvRate < 0 { - return errors.New("recv_rate can't be negative") + return errors.New("recv-rate can't be negative") } return nil } -// FuzzConnConfig is a FuzzedConnection configuration. -type FuzzConnConfig struct { - Mode int - MaxDelay time.Duration - ProbDropRW float64 - ProbDropConn float64 - ProbSleep float64 -} - -// DefaultFuzzConnConfig returns the default config. -func DefaultFuzzConnConfig() *FuzzConnConfig { - return &FuzzConnConfig{ - Mode: FuzzModeDrop, - MaxDelay: 3 * time.Second, - ProbDropRW: 0.2, - ProbDropConn: 0.00, - ProbSleep: 0.00, - } -} - //----------------------------------------------------------------------------- // MempoolConfig @@ -647,21 +622,21 @@ type MempoolConfig struct { RootDir string `mapstructure:"home"` Recheck bool `mapstructure:"recheck"` Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` + WalPath string `mapstructure:"wal-dir"` // Maximum number of transactions in the mempool Size int `mapstructure:"size"` // Limit the total size of all txs in the mempool. // This only accounts for raw transactions (e.g. given 1MB transactions and - // max_txs_bytes=5MB, mempool will only accept 5 transactions). - MaxTxsBytes int64 `mapstructure:"max_txs_bytes"` + // max-txs-bytes=5MB, mempool will only accept 5 transactions). + MaxTxsBytes int64 `mapstructure:"max-txs-bytes"` // Size of the cache (used to filter transactions we saw earlier) in transactions - CacheSize int `mapstructure:"cache_size"` + CacheSize int `mapstructure:"cache-size"` // Maximum size of a single transaction - // NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. - MaxTxBytes int `mapstructure:"max_tx_bytes"` + // NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. + MaxTxBytes int `mapstructure:"max-tx-bytes"` // Maximum size of a batch of transactions to send to a peer // Including space needed by encoding (one varint per transaction). - MaxBatchBytes int `mapstructure:"max_batch_bytes"` + MaxBatchBytes int `mapstructure:"max-batch-bytes"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool @@ -704,19 +679,19 @@ func (cfg *MempoolConfig) ValidateBasic() error { return errors.New("size can't be negative") } if cfg.MaxTxsBytes < 0 { - return errors.New("max_txs_bytes can't be negative") + return errors.New("max-txs-bytes can't be negative") } if cfg.CacheSize < 0 { - return errors.New("cache_size can't be negative") + return errors.New("cache-size can't be negative") } if cfg.MaxTxBytes < 0 { - return errors.New("max_tx_bytes can't be negative") + return errors.New("max-tx-bytes can't be negative") } if cfg.MaxBatchBytes < 0 { - return errors.New("max_batch_bytes can't be negative") + return errors.New("max-batch-bytes can't be negative") } if cfg.MaxBatchBytes <= cfg.MaxTxBytes { - return errors.New("max_batch_bytes can't be less or equal to max_tx_bytes") + return errors.New("max-batch-bytes can't be less or equal to max-tx-bytes") } return nil } @@ -727,12 +702,12 @@ func (cfg *MempoolConfig) ValidateBasic() error { // StateSyncConfig defines the configuration for the Tendermint state sync service type StateSyncConfig struct { Enable bool `mapstructure:"enable"` - TempDir string `mapstructure:"temp_dir"` - RPCServers []string `mapstructure:"rpc_servers"` - TrustPeriod time.Duration `mapstructure:"trust_period"` - TrustHeight int64 `mapstructure:"trust_height"` - TrustHash string `mapstructure:"trust_hash"` - DiscoveryTime time.Duration `mapstructure:"discovery_time"` + TempDir string `mapstructure:"temp-dir"` + RPCServers []string `mapstructure:"rpc-servers"` + TrustPeriod time.Duration `mapstructure:"trust-period"` + TrustHeight int64 `mapstructure:"trust-height"` + TrustHash string `mapstructure:"trust-hash"` + DiscoveryTime time.Duration `mapstructure:"discovery-time"` } func (cfg *StateSyncConfig) TrustHashBytes() []byte { @@ -761,28 +736,28 @@ func TestStateSyncConfig() *StateSyncConfig { func (cfg *StateSyncConfig) ValidateBasic() error { if cfg.Enable { if len(cfg.RPCServers) == 0 { - return errors.New("rpc_servers is required") + return errors.New("rpc-servers is required") } if len(cfg.RPCServers) < 2 { - return errors.New("at least two rpc_servers entries is required") + return errors.New("at least two rpc-servers entries is required") } for _, server := range cfg.RPCServers { if len(server) == 0 { - return errors.New("found empty rpc_servers entry") + return errors.New("found empty rpc-servers entry") } } if cfg.TrustPeriod <= 0 { - return errors.New("trusted_period is required") + return errors.New("trusted-period is required") } if cfg.TrustHeight <= 0 { - return errors.New("trusted_height is required") + return errors.New("trusted-height is required") } if len(cfg.TrustHash) == 0 { - return errors.New("trusted_hash is required") + return errors.New("trusted-hash is required") } _, err := hex.DecodeString(cfg.TrustHash) if err != nil { - return fmt.Errorf("invalid trusted_hash: %w", err) + return fmt.Errorf("invalid trusted-hash: %w", err) } } return nil @@ -827,39 +802,39 @@ func (cfg *FastSyncConfig) ValidateBasic() error { // including timeouts and details about the WAL and the block structure. type ConsensusConfig struct { RootDir string `mapstructure:"home"` - WalPath string `mapstructure:"wal_file"` + WalPath string `mapstructure:"wal-file"` walFile string // overrides WalPath if set // How long we wait for a proposal block before prevoting nil - TimeoutPropose time.Duration `mapstructure:"timeout_propose"` - // How much timeout_propose increases with each round - TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` + TimeoutPropose time.Duration `mapstructure:"timeout-propose"` + // How much timeout-propose increases with each round + TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"` // How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) - TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` - // How much the timeout_prevote increases with each round - TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` + TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"` + // How much the timeout-prevote increases with each round + TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"` // How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) - TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` - // How much the timeout_precommit increases with each round - TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` + TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"` + // How much the timeout-precommit increases with each round + TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"` // How long we wait after committing a block, before starting on the new // height (this gives us a chance to receive some more precommits, even // though we already have +2/3). - // NOTE: when modifying, make sure to update time_iota_ms genesis parameter - TimeoutCommit time.Duration `mapstructure:"timeout_commit"` + // NOTE: when modifying, make sure to update time-iota-ms genesis parameter + TimeoutCommit time.Duration `mapstructure:"timeout-commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) - SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` + SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"` // EmptyBlocks mode and possible interval between empty blocks - CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` - CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"` + CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"` + CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"` // Reactor sleep duration parameters - PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"` - PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"` + PeerGossipSleepDuration time.Duration `mapstructure:"peer-gossip-sleep-duration"` + PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"` - DoubleSignCheckHeight int64 `mapstructure:"double_sign_check_height"` + DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"` } // DefaultConsensusConfig returns a default configuration for the consensus service @@ -949,37 +924,37 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) { // returns an error if any check fails. func (cfg *ConsensusConfig) ValidateBasic() error { if cfg.TimeoutPropose < 0 { - return errors.New("timeout_propose can't be negative") + return errors.New("timeout-propose can't be negative") } if cfg.TimeoutProposeDelta < 0 { - return errors.New("timeout_propose_delta can't be negative") + return errors.New("timeout-propose-delta can't be negative") } if cfg.TimeoutPrevote < 0 { - return errors.New("timeout_prevote can't be negative") + return errors.New("timeout-prevote can't be negative") } if cfg.TimeoutPrevoteDelta < 0 { - return errors.New("timeout_prevote_delta can't be negative") + return errors.New("timeout-prevote-delta can't be negative") } if cfg.TimeoutPrecommit < 0 { - return errors.New("timeout_precommit can't be negative") + return errors.New("timeout-precommit can't be negative") } if cfg.TimeoutPrecommitDelta < 0 { - return errors.New("timeout_precommit_delta can't be negative") + return errors.New("timeout-precommit-delta can't be negative") } if cfg.TimeoutCommit < 0 { - return errors.New("timeout_commit can't be negative") + return errors.New("timeout-commit can't be negative") } if cfg.CreateEmptyBlocksInterval < 0 { - return errors.New("create_empty_blocks_interval can't be negative") + return errors.New("create-empty-blocks-interval can't be negative") } if cfg.PeerGossipSleepDuration < 0 { - return errors.New("peer_gossip_sleep_duration can't be negative") + return errors.New("peer-gossip-sleep-duration can't be negative") } if cfg.PeerQueryMaj23SleepDuration < 0 { - return errors.New("peer_query_maj23_sleep_duration can't be negative") + return errors.New("peer-query-maj23-sleep-duration can't be negative") } if cfg.DoubleSignCheckHeight < 0 { - return errors.New("double_sign_check_height can't be negative") + return errors.New("double-sign-check-height can't be negative") } return nil } @@ -1028,13 +1003,13 @@ type InstrumentationConfig struct { Prometheus bool `mapstructure:"prometheus"` // Address to listen for Prometheus collector(s) connections. - PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` + PrometheusListenAddr string `mapstructure:"prometheus-listen-addr"` // Maximum number of simultaneous connections. // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. - MaxOpenConnections int `mapstructure:"max_open_connections"` + MaxOpenConnections int `mapstructure:"max-open-connections"` // Instrumentation namespace. Namespace string `mapstructure:"namespace"` @@ -1061,7 +1036,7 @@ func TestInstrumentationConfig() *InstrumentationConfig { // returns an error if any check fails. func (cfg *InstrumentationConfig) ValidateBasic() error { if cfg.MaxOpenConnections < 0 { - return errors.New("max_open_connections can't be negative") + return errors.New("max-open-connections can't be negative") } return nil } diff --git a/config/toml.go b/config/toml.go index 3daaf95689..3ed9764061 100644 --- a/config/toml.go +++ b/config/toml.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io/ioutil" + "os" "path/filepath" "strings" "text/template" @@ -63,7 +64,7 @@ func WriteConfigFile(configFilePath string, config *Config) { panic(err) } - tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644) + mustWriteFile(configFilePath, buffer.Bytes(), 0644) } // Note: any changes to the comments/variables/mapstructure @@ -82,7 +83,7 @@ const defaultConfigTemplate = `# This is a TOML config file. # TCP or UNIX socket address of the ABCI application, # or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "{{ .BaseConfig.ProxyApp }}" +proxy-app = "{{ .BaseConfig.ProxyApp }}" # A custom human readable name for this node moniker = "{{ .BaseConfig.Moniker }}" @@ -90,7 +91,7 @@ moniker = "{{ .BaseConfig.Moniker }}" # If this node is many blocks behind the tip of the chain, FastSync # allows them to catchup quickly by downloading blocks in parallel # and verifying their commits -fast_sync = {{ .BaseConfig.FastSyncMode }} +fast-sync = {{ .BaseConfig.FastSyncMode }} # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) @@ -111,41 +112,41 @@ fast_sync = {{ .BaseConfig.FastSyncMode }} # * badgerdb (uses github.com/dgraph-io/badger) # - EXPERIMENTAL # - use badgerdb build tag (go build -tags badgerdb) -db_backend = "{{ .BaseConfig.DBBackend }}" +db-backend = "{{ .BaseConfig.DBBackend }}" # Database directory -db_dir = "{{ js .BaseConfig.DBPath }}" +db-dir = "{{ js .BaseConfig.DBPath }}" # Output level for logging, including package level options -log_level = "{{ .BaseConfig.LogLevel }}" +log-level = "{{ .BaseConfig.LogLevel }}" # Output format: 'plain' (colored text) or 'json' -log_format = "{{ .BaseConfig.LogFormat }}" +log-format = "{{ .BaseConfig.LogFormat }}" ##### additional base config options ##### # Path to the JSON file containing the initial validator set and other meta data -genesis_file = "{{ js .BaseConfig.Genesis }}" +genesis-file = "{{ js .BaseConfig.Genesis }}" # Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "{{ js .BaseConfig.PrivValidatorKey }}" +priv-validator-key-file = "{{ js .BaseConfig.PrivValidatorKey }}" # Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "{{ js .BaseConfig.PrivValidatorState }}" +priv-validator-state-file = "{{ js .BaseConfig.PrivValidatorState }}" # TCP or UNIX socket address for Tendermint to listen on for # connections from an external PrivValidator process -priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" +priv-validator-laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" # Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "{{ js .BaseConfig.NodeKey }}" +node-key-file = "{{ js .BaseConfig.NodeKey }}" # Mechanism to connect to the ABCI application: socket | grpc abci = "{{ .BaseConfig.ABCI }}" # If true, query the ABCI app on connecting to a new peer # so the app can decide if we should keep the connection or not -filter_peers = {{ .BaseConfig.FilterPeers }} +filter-peers = {{ .BaseConfig.FilterPeers }} ####################################################################### @@ -163,78 +164,78 @@ laddr = "{{ .RPC.ListenAddress }}" # A list of origins a cross-domain request can be executed from # Default value '[]' disables cors support # Use '["*"]' to allow any origin -cors_allowed_origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}] +cors-allowed-origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}] # A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}] +cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}] # A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] +cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] # TCP or UNIX socket address for the gRPC server to listen on # NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "{{ .RPC.GRPCListenAddress }}" +grpc-laddr = "{{ .RPC.GRPCListenAddress }}" # Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. # Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} # 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }} +grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }} -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = {{ .RPC.Unsafe }} # Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections +# Does not include gRPC connections. See grpc-max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. # Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} # 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = {{ .RPC.MaxOpenConnections }} +max-open-connections = {{ .RPC.MaxOpenConnections }} # Maximum number of unique clientIDs that can /subscribe # If you're using /broadcast_tx_commit, set to the estimated maximum number # of broadcast_tx_commit calls per block. -max_subscription_clients = {{ .RPC.MaxSubscriptionClients }} +max-subscription-clients = {{ .RPC.MaxSubscriptionClients }} # Maximum number of unique queries a given client can /subscribe to # If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to # the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} +max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }} # How long to wait for a tx to be committed during /broadcast_tx_commit. # WARNING: Using a value larger than 10s will result in increasing the # global HTTP write timeout, which applies to all connections and endpoints. # See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" +timeout-broadcast-tx-commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" # Maximum size of request body, in bytes -max_body_bytes = {{ .RPC.MaxBodyBytes }} +max-body-bytes = {{ .RPC.MaxBodyBytes }} # Maximum size of request header, in bytes -max_header_bytes = {{ .RPC.MaxHeaderBytes }} +max-header-bytes = {{ .RPC.MaxHeaderBytes }} # The path to a file containing certificate that is used to create the HTTPS server. # Migth be either absolute path or path related to tendermint's config directory. # If the certificate is signed by a certificate authority, # the certFile should be the concatenation of the server's certificate, any intermediates, # and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. # Otherwise, HTTP server is run. -tls_cert_file = "{{ .RPC.TLSCertFile }}" +tls-cert-file = "{{ .RPC.TLSCertFile }}" # The path to a file containing matching private key that is used to create the HTTPS server. # Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. # Otherwise, HTTP server is run. -tls_key_file = "{{ .RPC.TLSKeyFile }}" +tls-key-file = "{{ .RPC.TLSKeyFile }}" # pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "{{ .RPC.PprofListenAddress }}" +pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### ### P2P Configuration Options ### @@ -248,47 +249,47 @@ laddr = "{{ .P2P.ListenAddress }}" # If empty, will use the same port as the laddr, # and will introspect on the listener or use UPnP # to figure out the address. -external_address = "{{ .P2P.ExternalAddress }}" +external-address = "{{ .P2P.ExternalAddress }}" # Comma separated list of seed nodes to connect to seeds = "{{ .P2P.Seeds }}" # Comma separated list of nodes to keep persistent connections to -persistent_peers = "{{ .P2P.PersistentPeers }}" +persistent-peers = "{{ .P2P.PersistentPeers }}" # UPNP port forwarding upnp = {{ .P2P.UPNP }} # Path to address book -addr_book_file = "{{ js .P2P.AddrBook }}" +addr-book-file = "{{ js .P2P.AddrBook }}" # Set true for strict address routability rules # Set false for private or local networks -addr_book_strict = {{ .P2P.AddrBookStrict }} +addr-book-strict = {{ .P2P.AddrBookStrict }} # Maximum number of inbound peers -max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} +max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }} # Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} +max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }} # List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "{{ .P2P.UnconditionalPeerIDs }}" +unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}" # Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" +persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" # Time to wait before flushing messages out on the connection -flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}" +flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" # Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} +max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} # Rate at which packets can be sent, in bytes/second -send_rate = {{ .P2P.SendRate }} +send-rate = {{ .P2P.SendRate }} # Rate at which packets can be received, in bytes/second -recv_rate = {{ .P2P.RecvRate }} +recv-rate = {{ .P2P.RecvRate }} # Set true to enable the peer-exchange reactor pex = {{ .P2P.PexReactor }} @@ -297,17 +298,17 @@ pex = {{ .P2P.PexReactor }} # peers. If another node asks it for addresses, it responds and disconnects. # # Does not work if the peer-exchange reactor is disabled. -seed_mode = {{ .P2P.SeedMode }} +seed-mode = {{ .P2P.SeedMode }} # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" +private-peer-ids = "{{ .P2P.PrivatePeerIDs }}" # Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} +allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} # Peer connection configuration. -handshake_timeout = "{{ .P2P.HandshakeTimeout }}" -dial_timeout = "{{ .P2P.DialTimeout }}" +handshake-timeout = "{{ .P2P.HandshakeTimeout }}" +dial-timeout = "{{ .P2P.DialTimeout }}" ####################################################### ### Mempool Configurattion Option ### @@ -316,26 +317,26 @@ dial_timeout = "{{ .P2P.DialTimeout }}" recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} -wal_dir = "{{ js .Mempool.WalPath }}" +wal-dir = "{{ js .Mempool.WalPath }}" # Maximum number of transactions in the mempool size = {{ .Mempool.Size }} # Limit the total size of all txs in the mempool. # This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = {{ .Mempool.MaxTxsBytes }} +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = {{ .Mempool.MaxTxsBytes }} # Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = {{ .Mempool.CacheSize }} +cache-size = {{ .Mempool.CacheSize }} # Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = {{ .Mempool.MaxTxBytes }} +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = {{ .Mempool.MaxTxBytes }} # Maximum size of a batch of transactions to send to a peer # Including space needed by encoding (one varint per transaction). -max_batch_bytes = {{ .Mempool.MaxBatchBytes }} +max-batch-bytes = {{ .Mempool.MaxBatchBytes }} ####################################################### ### State Sync Configuration Options ### @@ -352,19 +353,19 @@ enable = {{ .StateSync.Enable }} # retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding # header hash obtained from a trusted source, and a period during which validators can be trusted. # -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2 # weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "{{ StringsJoin .StateSync.RPCServers "," }}" -trust_height = {{ .StateSync.TrustHeight }} -trust_hash = "{{ .StateSync.TrustHash }}" -trust_period = "{{ .StateSync.TrustPeriod }}" +rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}" +trust-height = {{ .StateSync.TrustHeight }} +trust-hash = "{{ .StateSync.TrustHash }}" +trust-period = "{{ .StateSync.TrustPeriod }}" # Time to spend discovering snapshots before initiating a restore. -discovery_time = "{{ .StateSync.DiscoveryTime }}" +discovery-time = "{{ .StateSync.DiscoveryTime }}" # Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). # Will create a new, randomly named directory within, and remove it when done. -temp_dir = "{{ .StateSync.TempDir }}" +temp-dir = "{{ .StateSync.TempDir }}" ####################################################### ### Fast Sync Configuration Connections ### @@ -381,46 +382,46 @@ version = "{{ .FastSync.Version }}" ####################################################### [consensus] -wal_file = "{{ js .Consensus.WalPath }}" +wal-file = "{{ js .Consensus.WalPath }}" # How long we wait for a proposal block before prevoting nil -timeout_propose = "{{ .Consensus.TimeoutPropose }}" -# How much timeout_propose increases with each round -timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" +timeout-propose = "{{ .Consensus.TimeoutPropose }}" +# How much timeout-propose increases with each round +timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}" # How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "{{ .Consensus.TimeoutPrevote }}" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" +timeout-prevote = "{{ .Consensus.TimeoutPrevote }}" +# How much the timeout-prevote increases with each round +timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}" # How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" +timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}" +# How much the timeout-precommit increases with each round +timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}" # How long we wait after committing a block, before starting on the new # height (this gives us a chance to receive some more precommits, even # though we already have +2/3). -timeout_commit = "{{ .Consensus.TimeoutCommit }}" +timeout-commit = "{{ .Consensus.TimeoutCommit }}" # How many blocks to look back to check existence of the node's consensus votes before joining consensus # When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# if the same consensus key was used to sign {double-sign-check-height} last blocks. # So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = {{ .Consensus.DoubleSignCheckHeight }} +double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }} # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} +skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }} # EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} -create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" +create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }} +create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" # Reactor sleep duration parameters -peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" -peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" +peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}" +peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" ####################################################### ### Transaction Indexer Configuration Options ### ####################################################### -[tx_index] +[tx-index] # What indexer to use for transactions # @@ -444,13 +445,13 @@ indexer = "{{ .TxIndex.Indexer }}" prometheus = {{ .Instrumentation.Prometheus }} # Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" +prometheus-listen-addr = "{{ .Instrumentation.PrometheusListenAddr }}" # Maximum number of simultaneous connections. # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. -max_open_connections = {{ .Instrumentation.MaxOpenConnections }} +max-open-connections = {{ .Instrumentation.MaxOpenConnections }} # Instrumentation namespace namespace = "{{ .Instrumentation.Namespace }}" @@ -491,16 +492,22 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config { chainID = "tendermint_test" } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + mustWriteFile(genesisFilePath, []byte(testGenesis), 0644) } // we always overwrite the priv val - tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) + mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) config := TestConfig().SetRoot(rootDir) return config } +func mustWriteFile(filePath string, contents []byte, mode os.FileMode) { + if err := ioutil.WriteFile(filePath, contents, mode); err != nil { + tmos.Exit(fmt.Sprintf("failed to write file: %v", err)) + } +} + var testGenesisFmt = `{ "genesis_time": "2018-10-10T08:20:13.695936996Z", "chain_id": "%s", diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 5d123f8db7..0268b31de8 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -108,7 +108,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { eventBuses[i] = css[i].eventBus reactors[i].SetEventBus(eventBuses[i]) - blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, 100) require.NoError(t, err) blocksSubs = append(blocksSubs, blocksSub) @@ -162,22 +162,22 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // Evidence should be submitted and committed at the third height but - // we will check the first five just in case + // we will check the first six just in case evidenceFromEachValidator := make([]types.Evidence, nValidators) wg := new(sync.WaitGroup) wg.Add(4) - for height := 1; height < 5; height++ { - for i := 0; i < nValidators; i++ { - go func(j int) { - msg := <-blocksSubs[j].Out() + for i := 0; i < nValidators; i++ { + go func(i int) { + for msg := range blocksSubs[i].Out() { block := msg.Data().(types.EventDataNewBlock).Block if len(block.Evidence.Evidence) != 0 { - evidenceFromEachValidator[j] = block.Evidence.Evidence[0] + evidenceFromEachValidator[i] = block.Evidence.Evidence[0] wg.Done() + return } - }(i) - } + } + }(i) } done := make(chan struct{}) @@ -186,7 +186,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { close(done) }() - pubkey, _ := bcs.privValidator.GetPubKey() + pubkey, err := bcs.privValidator.GetPubKey() + require.NoError(t, err) select { case <-done: @@ -198,11 +199,11 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { assert.Equal(t, prevoteHeight, ev.Height()) } } - case <-time.After(10 * time.Second): + case <-time.After(20 * time.Second): for i, reactor := range reactors { t.Logf("Consensus Reactor %d\n%v", i, reactor) } - t.Fatalf("Timed out waiting for all validators to commit first block") + t.Fatalf("Timed out waiting for validators to commit evidence") } } diff --git a/consensus/common_test.go b/consensus/common_test.go index be730e907c..198622cef9 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -414,7 +414,10 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV { privValidatorKeyFile := config.PrivValidatorKeyFile() ensureDir(filepath.Dir(privValidatorKeyFile), 0700) privValidatorStateFile := config.PrivValidatorStateFile() - privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + if err != nil { + panic(err) + } privValidator.Reset() return privValidator } @@ -739,7 +742,10 @@ func randConsensusNetWithPeers( panic(err) } - privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + if err != nil { + panic(err) + } } app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go index 3c2fb65cd8..1ddee58727 100644 --- a/consensus/invalid_test.go +++ b/consensus/invalid_test.go @@ -19,7 +19,7 @@ import ( func TestReactorInvalidPrecommit(t *testing.T) { N := 4 css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) - defer cleanup() + t.Cleanup(cleanup) for i := 0; i < 4; i++ { ticker := NewTimeoutTicker() @@ -43,7 +43,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { invalidDoPrevoteFunc(t, height, round, byzVal, byzR.Switch, pv) } byzVal.mtx.Unlock() - defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) }) // wait for a bunch of blocks // TODO: make this tighter by ensuring the halt happens by block 2 diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 9785a71ae4..88e41ac29a 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -26,7 +26,8 @@ func assertMempool(txn txNotifier) mempl.Mempool { func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") - defer os.RemoveAll(config.RootDir) + t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) + config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(1, false, 10) cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) @@ -45,7 +46,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") - defer os.RemoveAll(config.RootDir) + t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, false, 10) @@ -63,7 +64,8 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { func TestMempoolProgressInHigherRound(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") - defer os.RemoveAll(config.RootDir) + t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) + config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(1, false, 10) cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) diff --git a/consensus/msgs.go b/consensus/msgs.go index c6e7aa686c..686284a4c7 100644 --- a/consensus/msgs.go +++ b/consensus/msgs.go @@ -167,11 +167,14 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { case *tmcons.Message_NewValidBlock: pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) if err != nil { - return nil, fmt.Errorf("parts to proto error: %w", err) + return nil, fmt.Errorf("parts header to proto error: %w", err) } pbBits := new(bits.BitArray) - pbBits.FromProto(msg.NewValidBlock.BlockParts) + err = pbBits.FromProto(msg.NewValidBlock.BlockParts) + if err != nil { + return nil, fmt.Errorf("parts to proto error: %w", err) + } pb = &NewValidBlockMessage{ Height: msg.NewValidBlock.Height, @@ -191,7 +194,10 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { } case *tmcons.Message_ProposalPol: pbBits := new(bits.BitArray) - pbBits.FromProto(&msg.ProposalPol.ProposalPol) + err := pbBits.FromProto(&msg.ProposalPol.ProposalPol) + if err != nil { + return nil, fmt.Errorf("proposal PoL to proto error: %w", err) + } pb = &ProposalPOLMessage{ Height: msg.ProposalPol.Height, ProposalPOLRound: msg.ProposalPol.ProposalPolRound, @@ -237,10 +243,13 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { case *tmcons.Message_VoteSetBits: bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) if err != nil { - return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err) + return nil, fmt.Errorf("block ID to proto error: %w", err) } bits := new(bits.BitArray) - bits.FromProto(&msg.VoteSetBits.Votes) + err = bits.FromProto(&msg.VoteSetBits.Votes) + if err != nil { + return nil, fmt.Errorf("votes to proto error: %w", err) + } pb = &VoteSetBitsMessage{ Height: msg.VoteSetBits.Height, diff --git a/consensus/reactor.go b/consensus/reactor.go index cd50c061fb..09871e7652 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -220,6 +220,8 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Peer state updates can happen in parallel, but processing of // proposals, block parts, and votes are ordered by the receiveRoutine // NOTE: blocks on consensus state for proposals, block parts, and votes +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { if !conR.IsRunning() { conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) @@ -228,7 +230,7 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { msg, err := decodeMsg(msgBytes) if err != nil { - conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) conR.Switch.StopPeerForError(src, err) return } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 32c35ad15f..b3c601aaf7 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -172,9 +172,7 @@ func TestReactorWithEvidence(t *testing.T) { evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ ev}, int64(len(ev.Bytes()))) - evpool.On("Update", mock.AnythingOfType("state.State")).Return() - evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), mock.AnythingOfType("[]types.Evidence")).Return( - []abci.Evidence{}) + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() evpool2 := sm.EmptyEvidencePool{} diff --git a/consensus/replay.go b/consensus/replay.go index 9507f9b2bc..beb0d70039 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "fmt" "hash/crc32" "io" @@ -241,7 +242,7 @@ func (h *Handshaker) NBlocks() int { func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) + res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo) if err != nil { return fmt.Errorf("error calling Info: %v", err) } @@ -316,7 +317,7 @@ func (h *Handshaker) ReplayBlocks( Validators: nextVals, AppStateBytes: h.genDoc.AppState, } - res, err := proxyApp.Consensus().InitChainSync(req) + res, err := proxyApp.Consensus().InitChainSync(context.Background(), req) if err != nil { return nil, err } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index db7a04f54b..28931b99d0 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -136,10 +136,10 @@ func TestWALCrash(t *testing.T) { 3}, } - for i, tc := range testCases { + for _, tc := range testCases { tc := tc - consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i)) t.Run(tc.name, func(t *testing.T) { + consensusReplayConfig := ResetConfig(tc.name) crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) } @@ -627,7 +627,8 @@ func TestMockProxyApp(t *testing.T) { mock.SetResponseCallback(proxyCb) someTx := []byte("tx") - mock.DeliverTxAsync(abci.RequestDeliverTx{Tx: someTx}) + _, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx}) + assert.NoError(t, err) }) assert.True(t, validTxs == 1) assert.True(t, invalidTxs == 0) @@ -658,7 +659,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin var genesisState sm.State if testValidatorsChange { testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) - defer os.RemoveAll(testConfig.RootDir) + defer func() { _ = os.RemoveAll(testConfig.RootDir) }() stateDB = dbm.NewMemDB() genesisState = sim.GenesisState @@ -668,7 +669,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin store = newMockBlockStore(config, genesisState.ConsensusParams) } else { // test single node testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) - defer os.RemoveAll(testConfig.RootDir) + defer func() { _ = os.RemoveAll(testConfig.RootDir) }() walBody, err := WALWithNBlocks(t, numBlocks) require.NoError(t, err) walFile := tempWALWithData(walBody) @@ -750,7 +751,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } // get the latest app hash from the app - res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) + res, err := proxyApp.Query().InfoSync(context.Background(), abci.RequestInfo{Version: ""}) if err != nil { t.Fatal(err) } @@ -797,7 +798,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store, state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) @@ -847,7 +848,7 @@ func buildTMStateFromChain( state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ + if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) @@ -885,7 +886,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - 0x02 // - 0x03 config := ResetConfig("handshake_test_") - defer os.RemoveAll(config.RootDir) + t.Cleanup(func() { os.RemoveAll(config.RootDir) }) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) const appVersion = 0x0 pubKey, err := privVal.GetPubKey() @@ -1228,7 +1229,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) { clientCreator := proxy.NewLocalClientCreator(app) config := ResetConfig("handshake_test_") - defer os.RemoveAll(config.RootDir) + t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) + privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) pubKey, err := privVal.GetPubKey() require.NoError(t, err) diff --git a/consensus/state.go b/consensus/state.go index 05087e9f92..9c16108c21 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -73,9 +73,9 @@ type txNotifier interface { // interface to the evidence pool type evidencePool interface { - // Adds consensus based evidence to the evidence pool where time is the time - // of the block where the offense occurred and the validator set is the current one. - AddEvidenceFromConsensus(types.Evidence, time.Time, *types.ValidatorSet) error + // Adds consensus based evidence to the evidence pool. This function differs to + // AddEvidence by bypassing verification and adding it immediately to the pool + AddEvidenceFromConsensus(types.Evidence) error } // State handles execution of the consensus algorithm. @@ -328,7 +328,7 @@ func (cs *State) OnStart() error { return err } - cs.Logger.Info("WAL file is corrupted. Attempting repair", "err", err) + cs.Logger.Error("WAL file is corrupted, attempting repair", "err", err) // 1) prep work if err := cs.wal.Stop(); err != nil { @@ -345,7 +345,7 @@ func (cs *State) OnStart() error { // 3) try to repair (WAL file will be overwritten!) if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { - cs.Logger.Error("Repair failed", "err", err) + cs.Logger.Error("WAL repair failed", "err", err) return err } cs.Logger.Info("Successful repair") @@ -1871,10 +1871,14 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { } else { timestamp = sm.MedianTime(cs.LastCommit.MakeCommit(), cs.LastValidators) } - evidenceErr := cs.evpool.AddEvidenceFromConsensus( - types.NewDuplicateVoteEvidence(voteErr.VoteA, voteErr.VoteB), timestamp, cs.Validators) + // form duplicate vote evidence from the conflicting votes and send it across to the + // evidence pool + ev := types.NewDuplicateVoteEvidence(voteErr.VoteA, voteErr.VoteB, timestamp, cs.Validators) + evidenceErr := cs.evpool.AddEvidenceFromConsensus(ev) if evidenceErr != nil { cs.Logger.Error("Failed to add evidence to the evidence pool", "err", evidenceErr) + } else { + cs.Logger.Debug("Added evidence to the evidence pool", "ev", ev) } return added, err } else if err == types.ErrVoteNonDeterministicSignature { @@ -2212,7 +2216,7 @@ func repairWalFile(src, dst string) error { } defer in.Close() - out, err := os.Open(dst) + out, err := os.Create(dst) if err != nil { return err } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index da2841d54c..7aafe21e84 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -40,7 +40,10 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. privValidatorKeyFile := config.PrivValidatorKeyFile() privValidatorStateFile := config.PrivValidatorStateFile() - privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + if err != nil { + return err + } genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) if err != nil { return fmt.Errorf("failed to read genesis file: %w", err) diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 28dd0f8563..e2af9a748f 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -3,8 +3,6 @@ package consensus import ( "bytes" "crypto/rand" - "io/ioutil" - "os" "path/filepath" // "sync" @@ -27,10 +25,7 @@ const ( ) func TestWALTruncate(t *testing.T) { - walDir, err := ioutil.TempDir("", "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - + walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") // this magic number 4K can truncate the content when RotateFile. @@ -45,14 +40,14 @@ func TestWALTruncate(t *testing.T) { wal.SetLogger(log.TestingLogger()) err = wal.Start() require.NoError(t, err) - defer func() { + t.Cleanup(func() { if err := wal.Stop(); err != nil { t.Error(err) } // wait for the wal to finish shutting down so we // can safely remove the directory wal.Wait() - }() + }) // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this @@ -71,7 +66,7 @@ func TestWALTruncate(t *testing.T) { assert.NoError(t, err, "expected not to err on height %d", h) assert.True(t, found, "expected to find end height for %d", h) assert.NotNil(t, gr) - defer gr.Close() + t.Cleanup(func() { _ = gr.Close() }) dec := NewWALDecoder(gr) msg, err := dec.Decode() @@ -109,23 +104,21 @@ func TestWALEncoderDecoder(t *testing.T) { } func TestWALWrite(t *testing.T) { - walDir, err := ioutil.TempDir("", "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) + walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") wal, err := NewWAL(walFile) require.NoError(t, err) err = wal.Start() require.NoError(t, err) - defer func() { + t.Cleanup(func() { if err := wal.Stop(); err != nil { t.Error(err) } // wait for the wal to finish shutting down so we // can safely remove the directory wal.Wait() - }() + }) // 1) Write returns an error if msg is too big msg := &BlockPartMessage{ @@ -166,7 +159,7 @@ func TestWALSearchForEndHeight(t *testing.T) { assert.NoError(t, err, "expected not to err on height %d", h) assert.True(t, found, "expected to find end height for %d", h) assert.NotNil(t, gr) - defer gr.Close() + t.Cleanup(func() { _ = gr.Close() }) dec := NewWALDecoder(gr) msg, err := dec.Decode() @@ -177,12 +170,10 @@ func TestWALSearchForEndHeight(t *testing.T) { } func TestWALPeriodicSync(t *testing.T) { - walDir, err := ioutil.TempDir("", "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - + walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) @@ -196,12 +187,12 @@ func TestWALPeriodicSync(t *testing.T) { assert.NotZero(t, wal.Group().Buffered()) require.NoError(t, wal.Start()) - defer func() { + t.Cleanup(func() { if err := wal.Stop(); err != nil { t.Error(err) } wal.Wait() - }() + }) time.Sleep(walTestFlushInterval + (10 * time.Millisecond)) @@ -239,7 +230,6 @@ func nBytes(n int) []byte { func benchmarkWalDecode(b *testing.B, n int) { // registerInterfacesOnce() - buf := new(bytes.Buffer) enc := NewWALEncoder(buf) diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index 763a783e31..d7d9de5f0e 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -2,11 +2,12 @@ package ed25519 import ( "bytes" + "crypto/ed25519" "crypto/subtle" "fmt" "io" - "golang.org/x/crypto/ed25519" + "github.com/hdevalence/ed25519consensus" "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/tmhash" @@ -151,7 +152,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool { return false } - return ed25519.Verify(ed25519.PublicKey(pubKey), msg, sig) + return ed25519consensus.Verify(ed25519.PublicKey(pubKey), msg, sig) } func (pubKey PubKey) String() string { diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index c1f17aa941..59012fba13 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -21,6 +21,24 @@ module.exports = { key: "59f0e2deb984aa9cdf2b3a5fd24ac501", index: "tendermint" }, + versions: [ + { + "label": "v0.32", + "key": "v0.32" + }, + { + "label": "v0.33", + "key": "v0.33" + }, + { + "label": "v0.34", + "key": "v0.34" + }, + { + "label": "master", + "key": "master" + } + ], topbar: { banner: false, }, @@ -39,17 +57,6 @@ module.exports = { path: 'https://docs.tendermint.com/master/rpc/', static: true }, - // TODO: remove once https://github.com/cosmos/vuepress-theme-cosmos/issues/91 is closed - { - title: "Version 0.32", - path: "/v0.32", - static: true - }, - { - title: "Version 0.33", - path: "/v0.33", - static: true - }, ] } ] @@ -71,7 +78,7 @@ module.exports = { }, footer: { question: { - text: "Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more." + text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' }, logo: '/logo-bw.svg', textLink: { @@ -105,7 +112,7 @@ module.exports = { } ], smallprint: - "The development of the Tendermint project is led primarily by Tendermint Inc., the for-profit entity which also maintains this website. Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit.", + 'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.', links: [ { title: 'Documentation', diff --git a/docs/DEV_SESSIONS.md b/docs/DEV_SESSIONS.md deleted file mode 100644 index 1ff4e3641e..0000000000 --- a/docs/DEV_SESSIONS.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -order: 1 ---- - -# Developer Sessions - -The Tendermint Core developer call is comprised of both [Interchain -Foundation](http://interchain.io/) and [All in Bits](https://tendermint.com/) -team members discussing the development of [Tendermint -BFT](https://github.com/tendermint/tendermint) and related research. The goal -of the Tendermint Core developer calls is to provide transparency into the -decision making process, technical information, update cycles etc. - -## List - -| Date | Topic | Link(s) | -| -------------- | ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -| October 2019 | ABCI Overview (2/2) | [Youtube](https://www.youtube.com/watch?v=K3-E5wj2jA8) | -| October 2019 | ABCI Overview (1/2) | [YouTube](https://www.youtube.com/watch?v=I3OnA8yCHl4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | -| September 2019 | IAVL+ Presentation | [YouTube](https://www.youtube.com/watch?v=e5wwBaCTc9Y&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | -| September 2019 | Tendermint Dev Session - Blockchain Reactor in TLA+ | [YouTube](https://www.youtube.com/watch?v=q0e0pEQ5aiY&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=3) | -| September 2019 | Tendermint Code Review - SkipTimeoutCommit & Block Rollback | [YouTube](https://www.youtube.com/watch?v=MCo_oH7rys8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | -| September 2019 | Tendermint Evidence Handling | [YouTube](https://www.youtube.com/watch?v=-4H3_DVlYRk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | -| August 2019 | Part Three: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=whyL6UrKe7I&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | -| August 2019 | Fork Accountability | [YouTube](https://www.youtube.com/watch?v=Jph-4PGtdPo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | -| July 2019 | Part Two: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=gTjG7jNNdKQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | -| July 2019 | Part One: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=C6fH_sgPJzA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | -| June 2019 | Testnet Deployments | [YouTube](https://www.youtube.com/watch?v=gYA6no7tRlM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | -| June 2019 | Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=JLBGH8yxABk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | -| June 2019 | Tendermint Rust Libraries | [YouTube](https://www.youtube.com/watch?v=-WXKdyoGHwA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | -| May 2019 | Merkle Tree Deep Dive | [YouTube](https://www.youtube.com/watch?v=L3bt2Uw8ICg&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | -| May 2019 | Remote Signer Refactor | [YouTube](https://www.youtube.com/watch?v=eUyXXEEuBzQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=12) | -| May 2019 | Introduction to Ansible | [YouTube](https://www.youtube.com/watch?v=72clQLjzPg4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=14&t=0s) | -| April 2019 | Tendermint State Sync Design Discussion | [YouTube](https://www.youtube.com/watch?v=4k23j2QHwrM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | -| April 2019 | ADR-036 - Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=TW2xC1LwEkE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | -| April 2019 | Verifying Distributed Algorithms | [YouTube](https://www.youtube.com/watch?v=tMd4lgPVBxE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | -| April 2019 | Byzantine Model Checker Presentation | [YouTube](https://www.youtube.com/watch?v=rdXl4VCQyow&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | -| January 2019 | Proposer Selection in Idris | [YouTube](https://www.youtube.com/watch?v=hWZdc9c1aH8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | -| January 2019 | Current Mempool Design | [YouTube](https://www.youtube.com/watch?v=--iGIYYiLu4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | -| December 2018 | ABCI Proxy App | [YouTube](https://www.youtube.com/watch?v=s6sQ2HOVHdo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | -| October 2018 | DB Performance | [YouTube](https://www.youtube.com/watch?v=jVSNHi4l0fQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | -| October 2018 | Alternative Mempool Algorithms | [YouTube](https://www.youtube.com/watch?v=XxH5ZtM4vMM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | -| October 2018 | Tendermint Termination | [YouTube](https://www.youtube.com/watch?v=YBZjecfjeIk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | diff --git a/docs/architecture/README.md b/docs/architecture/README.md index a0b95d8803..bce77b2c71 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -63,7 +63,7 @@ Note the context/background should be written in the present tense. - [ADR-034-Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md) - [ADR-035-Documentation](./adr-035-documentation.md) - [ADR-037-Deliver-Block](./adr-037-deliver-block.md) -- [ADR-038-non-zero-start-height](./adr-038-non-zero-start-height.md) +- [ADR-038-Non-Zero-Start-Height](./adr-038-non-zero-start-height.md) - [ADR-039-Peer-Behaviour](./adr-039-peer-behaviour.md) - [ADR-041-Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) - [ADR-043-Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md) @@ -74,8 +74,13 @@ Note the context/background should be written in the present tense. - [ADR-051-Double-Signing-Risk-Reduction](./adr-051-double-signing-risk-reduction.md) - [ADR-052-Tendermint-Mode](./adr-052-tendermint-mode.md) - [ADR-053-State-Sync-Prototype](./adr-053-state-sync-prototype.md) -- [ADR-054-crypto-encoding-2](./adr-054-crypto-encoding-2.md) -- [ADR-055-protobuf-design](./adr-055-protobuf-design.md) -- [ADR-056-light-client-amnesia-attacks](./adr-056-light-client-amnesia-attacks) +- [ADR-054-Crypto-Encoding-2](./adr-054-crypto-encoding-2.md) +- [ADR-055-Protobuf-Design](./adr-055-protobuf-design.md) +- [ADR-056-Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks) - [ADR-057-RPC](./adr-057-RPC.md) -- [ADR-058-event-hashing](./adr-058-event-hashing.md) +- [ADR-058-Event-Hashing](./adr-058-event-hashing.md) +- [ADR-059-Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md) +- [ADR-060-Go-API-Stability](./adr-060-go-api-stability.md) +- [ADR-061-P2P-Refactor-Scope](./adr-061-p2p-refactor-scope.md) +- [ADR-062-P2P-Architecture](./adr-062-p2p-architecture.md) +- [ADR-063-Privval-gRPC](./adr-063-privval-grpc.md) diff --git a/docs/architecture/adr-034-priv-validator-file-structure.md b/docs/architecture/adr-034-priv-validator-file-structure.md index 83160bfb8b..8eb7464b4d 100644 --- a/docs/architecture/adr-034-priv-validator-file-structure.md +++ b/docs/architecture/adr-034-priv-validator-file-structure.md @@ -57,7 +57,7 @@ What we need to do next is changing the methods of `FilePV`. ## Status -Draft. +Accepted and implemented in [#2870](https://github.com/tendermint/tendermint/pull/2870). ## Consequences diff --git a/docs/architecture/adr-056-light-client-amnesia-attacks.md b/docs/architecture/adr-056-light-client-amnesia-attacks.md index 218ad32eb9..b2a2680520 100644 --- a/docs/architecture/adr-056-light-client-amnesia-attacks.md +++ b/docs/architecture/adr-056-light-client-amnesia-attacks.md @@ -160,7 +160,7 @@ When, `state.LastBlockHeight > PotentialAmnesiaEvidence.timestamp + ProofTrialPe *NOTE: Even before the evidence is proposed and committed, the off-chain process of gossiping valid evidence could be enough for honest nodes to recognize the fork and halt.* -Other validators will vote if: +Other validators will vote `nil` if: - The Amnesia Evidence is not valid - The Amensia Evidence is not within their own trial period i.e. too soon. diff --git a/docs/architecture/adr-061-p2p-refactor-scope.md b/docs/architecture/adr-061-p2p-refactor-scope.md new file mode 100644 index 0000000000..7a9cb04bee --- /dev/null +++ b/docs/architecture/adr-061-p2p-refactor-scope.md @@ -0,0 +1,109 @@ +# ADR 061: P2P Refactor Scope + +## Changelog + +- 2020-10-30: Initial version (@erikgrinaker) + +## Context + +The `p2p` package responsible for peer-to-peer networking is rather old and has a number of weaknesses, including tight coupling, leaky abstractions, lack of tests, DoS vulnerabilites, poor performance, custom protocols, and incorrect behavior. A refactor has been discussed for several years ([#2067](https://github.com/tendermint/tendermint/issues/2067)). + +Informal Systems are also building a Rust implementation of Tendermint, [Tendermint-rs](https://github.com/informalsystems/tendermint-rs), and plan to implement P2P networking support over the next year. As part of this work, they have requested adopting e.g. [QUIC](https://datatracker.ietf.org/doc/draft-ietf-quic-transport/) as a transport protocol instead of implementing the custom application-level `MConnection` stream multiplexing protocol that Tendermint currently uses. + +This ADR summarizes recent discussion with stakeholders on the scope of a P2P refactor. Specific designs and implementations will be submitted as separate ADRs. + +## Alternative Approaches + +There have been recurring proposals to adopt [LibP2P](https://libp2p.io) instead of maintaining our own P2P networking stack (see [#3696](https://github.com/tendermint/tendermint/issues/3696)). While this appears to be a good idea in principle, it would be a highly breaking protocol change, there are indications that we might have to fork and modify LibP2P, and there are concerns about the abstractions used. + +In discussions with Informal Systems we decided to begin with incremental improvements to the current P2P stack, add support for pluggable transports, and then gradually start experimenting with LibP2P as a transport layer. If this proves successful, we can consider adopting it for higher-level components at a later time. + +## Decision + +The P2P stack will be refactored and improved iteratively, in several phases: + +* **Phase 1:** code and API refactoring, maintaining protocol compatibility as far as possible. + +* **Phase 2:** additional transports and incremental protocol improvements. + +* **Phase 3:** disruptive protocol changes. + +The scope of phases 2 and 3 is still uncertain, and will be revisited once the preceding phases have been completed as we'll have a better sense of requirements and challenges. + +## Detailed Design + +Separate ADRs will be submitted for specific designs and changes in each phase, following research and prototyping. Below are objectives in order of priority. + +### Phase 1: Code and API Refactoring + +This phase will focus on improving the internal abstractions and implementations in the `p2p` package. As far as possible, it should not change the P2P protocol in a backwards-incompatible way. + +* Cleaner, decoupled abstractions for e.g. `Reactor`, `Switch`, and `Peer`. [#2067](https://github.com/tendermint/tendermint/issues/2067) [#5287](https://github.com/tendermint/tendermint/issues/5287) [#3833](https://github.com/tendermint/tendermint/issues/3833) + * Reactors should receive messages in separate goroutines or via buffered channels. [#2888](https://github.com/tendermint/tendermint/issues/2888) +* Improved peer lifecycle management. [#3679](https://github.com/tendermint/tendermint/issues/3679) [#3719](https://github.com/tendermint/tendermint/issues/3719) [#3653](https://github.com/tendermint/tendermint/issues/3653) [#3540](https://github.com/tendermint/tendermint/issues/3540) [#3183](https://github.com/tendermint/tendermint/issues/3183) [#3081](https://github.com/tendermint/tendermint/issues/3081) [#1356](https://github.com/tendermint/tendermint/issues/1356) + * Peer prioritization. [#2860](https://github.com/tendermint/tendermint/issues/2860) [#2041](https://github.com/tendermint/tendermint/issues/2041) +* Pluggable transports, with `MConnection` as one implementation. [#5587](https://github.com/tendermint/tendermint/issues/5587) [#2430](https://github.com/tendermint/tendermint/issues/2430) [#805](https://github.com/tendermint/tendermint/issues/805) +* Improved peer address handling. + * Address book refactor. [#4848](https://github.com/tendermint/tendermint/issues/4848) [#2661](https://github.com/tendermint/tendermint/issues/2661) + * Transport-agnostic peer addressing. [#5587](https://github.com/tendermint/tendermint/issues/5587) [#3782](https://github.com/tendermint/tendermint/issues/3782) [#3692](https://github.com/tendermint/tendermint/issues/3692) + * Improved detection and advertisement of own address. [#5588](https://github.com/tendermint/tendermint/issues/5588) [#4260](https://github.com/tendermint/tendermint/issues/4260) [#3716](https://github.com/tendermint/tendermint/issues/3716) [#1727](https://github.com/tendermint/tendermint/issues/1727) + * Support multiple IPs per peer. [#1521](https://github.com/tendermint/tendermint/issues/1521) [#2317](https://github.com/tendermint/tendermint/issues/2317) + +The refactor should attempt to address the following secondary objectives: testability, observability, performance, security, quality-of-service, backpressure, and DoS resilience. Much of this will be revisited as explicit objectives in phase 2. + +Ideally, the refactor should happen incrementally, with regular merges to `master` every few weeks. This will take more time overall, and cause frequent breaking changes to internal Go APIs, but it reduces the branch drift and gets the code tested sooner and more broadly. + +### Phase 2: Additional Transports and Protocol Improvements + +This phase will focus on protocol improvements and other breaking changes. The following are considered proposals that will need to be evaluated separately once the refactor is done. Additional proposals are likely to be added during phase 1. + +* QUIC transport. [#198](https://github.com/tendermint/spec/issues/198) +* Noise protocol for secret connection handshake. [#5589](https://github.com/tendermint/tendermint/issues/5589) [#3340](https://github.com/tendermint/tendermint/issues/3340) +* Peer ID in connection handshake. [#5590](https://github.com/tendermint/tendermint/issues/5590) +* Peer and service discovery (e.g. RPC nodes, state sync snapshots). [#5481](https://github.com/tendermint/tendermint/issues/5481) [#4583](https://github.com/tendermint/tendermint/issues/4583) +* Rate-limiting, backpressure, and QoS scheduling. [#4753](https://github.com/tendermint/tendermint/issues/4753) [#2338](https://github.com/tendermint/tendermint/issues/2338) +* Compression. [#2375](https://github.com/tendermint/tendermint/issues/2375) +* Improved metrics and tracing. [#3849](https://github.com/tendermint/tendermint/issues/3849) [#2600](https://github.com/tendermint/tendermint/issues/2600) +* Simplified P2P configuration options. + +### Phase 3: Disruptive Protocol Changes + +This phase covers speculative, wide-reaching proposals that are poorly defined and highly uncertain. They will be evaluated once the previous phases are done. + +* Adopt LibP2P. [#3696](https://github.com/tendermint/tendermint/issues/3696) +* Allow cross-reactor communication, possibly without channels. +* Dynamic channel advertisment, as reactors are enabled/disabled. [#4394](https://github.com/tendermint/tendermint/issues/4394) [#1148](https://github.com/tendermint/tendermint/issues/1148) +* Pubsub-style networking topology and pattern. +* Support multiple chain IDs in the same network. + +## Status + +Accepted + +## Consequences + +### Positive + +* Cleaner, simpler architecture that's easier to reason about and test, and thus hopefully less buggy. + +* Improved performance and robustness. + +* Reduced maintenance burden and increased interoperability by the possible adoption of standardized protocols such as QUIC and Noise. + +* Improved usability, with better observability, simpler configuration, and more automation (e.g. peer/service/address discovery, rate-limiting, and backpressure). + +### Negative + +* Maintaining our own P2P networking stack is resource-intensive. + +* Abstracting away the underlying transport may prevent usage of advanced transport features. + +* Breaking changes to APIs and protocols are disruptive to users. + +## References + +See issue links above. + +- [#2067: P2P Refactor](https://github.com/tendermint/tendermint/issues/2067) + +- [P2P refactor brainstorm document](https://docs.google.com/document/d/1FUTADZyLnwA9z7ndayuhAdAFRKujhh_y73D0ZFdKiOQ/edit?pli=1#) diff --git a/docs/architecture/adr-062-p2p-architecture.md b/docs/architecture/adr-062-p2p-architecture.md new file mode 100644 index 0000000000..5fae7301de --- /dev/null +++ b/docs/architecture/adr-062-p2p-architecture.md @@ -0,0 +1,531 @@ +# ADR 062: P2P Architecture and Abstractions + +## Changelog + +- 2020-11-09: Initial version (@erikgrinaker) + +- 2020-11-13: Remove stream IDs, move peer errors onto channel, note on moving PEX into core (@erikgrinaker) + +- 2020-11-16: Notes on recommended reactor implementation patterns, approve ADR (@erikgrinaker) + +## Context + +In [ADR 061](adr-061-p2p-refactor-scope.md) we decided to refactor the peer-to-peer (P2P) networking stack. The first phase is to redesign and refactor the internal P2P architecture, while retaining protocol compatibility as far as possible. + +## Alternative Approaches + +Several variations of the proposed design were considered, including e.g. calling interface methods instead of passing messages (like the current architecture), merging channels with streams, exposing the internal peer data structure to reactors, being message format-agnostic via arbitrary codecs, and so on. This design was chosen because it has very loose coupling, is simpler to reason about and more convenient to use, avoids race conditions and lock contention for internal data structures, gives reactors better control of message ordering and processing semantics, and allows for QoS scheduling and backpressure in a very natural way. + +[multiaddr](https://github.com/multiformats/multiaddr) was considered as a transport-agnostic peer address format over regular URLs, but it does not appear to have very widespread adoption, and advanced features like protocol encapsulation and tunneling do not appear to be immediately useful to us. + +There were also proposals to use LibP2P instead of maintaining our own P2P stack, which were rejected (for now) in [ADR 061](adr-061-p2p-refactor-scope.md). + +## Decision + +The P2P stack will be redesigned as a message-oriented architecture, primarily relying on Go channels for communication and scheduling. It will use IO stream transports to exchange raw bytes with individual peers, bidirectional peer-addressable channels to send and receive Protobuf messages, and a router to route messages between reactors and peers. Message passing is asynchronous with at-most-once delivery. + +## Detailed Design + +This ADR is primarily concerned with the architecture and interfaces of the P2P stack, not implementation details. Separate ADRs may be submitted for individual components, since implementation may be non-trivial. The interfaces described here should therefore be considered a rough architecture outline, not a complete and final design. + +Primary design objectives have been: + +* Loose coupling between components, for a simpler, more robust, and test-friendly architecture. +* Pluggable transports (not necessarily networked). +* Better scheduling of messages, with improved prioritization, backpressure, and performance. +* Centralized peer lifecycle and connection management. +* Better peer address detection, advertisement, and exchange. +* Wire-level backwards compatibility with current P2P network protocols, except where it proves too obstructive. + +The main abstractions in the new stack are: + +* `peer`: A node in the network, uniquely identified by a `PeerID` and stored in a `peerStore`. +* `Transport`: An arbitrary mechanism to exchange bytes with a peer using IO `Stream`s across a `Connection`. +* `Channel`: A bidirectional channel to asynchronously exchange Protobuf messages with peers addressed with `PeerID`. +* `Router`: Maintains transport connections to relevant peers and routes channel messages. +* Reactor: A design pattern loosely defined as "something which listens on a channel and reacts to messages". + +These abstractions are illustrated in the following diagram (representing the internals of node A) and described in detail below. + +![P2P Architecture Diagram](img/adr-062-architecture.svg) + +### Transports + +Transports are arbitrary mechanisms for exchanging raw bytes with a peer. For example, a gRPC transport would connect to a peer over TCP/IP and send data using the gRPC protocol, while an in-memory transport might communicate with a peer running in another goroutine using internal byte buffers. Note that transports don't have a notion of a `peer` as such - instead, they communicate with an arbitrary endpoint address (e.g. IP address and port number), to decouple them from the rest of the P2P stack. + +Transports must satisfy the following requirements: + +* Be connection-oriented, and support both listening for inbound connections and making outbound connections using endpoint addresses. + +* Support multiple logical IO streams within a single connection, to take full advantage of protocols with native stream support. For example, QUIC supports multiple independent streams, while HTTP/2 and MConn multiplex logical streams onto a single TCP connection. + +* Provide the public key of the peer, and possibly encrypt or sign the traffic as appropriate. This should be compared with known data (e.g. the peer ID) to authenticate the peer and avoid man-in-the-middle attacks. + +The initial transport implementation will be a port of the current MConn protocol currently used by Tendermint, and should be backwards-compatible at the wire level as far as possible. This will be followed by an in-memory transport for testing, and a QUIC transport that may eventually replace MConn. + +The `Transport` interface is: + +```go +// Transport is an arbitrary mechanism for exchanging bytes with a peer. +type Transport interface { + // Accept waits for the next inbound connection on a listening endpoint. + Accept(context.Context) (Connection, error) + + // Dial creates an outbound connection to an endpoint. + Dial(context.Context, Endpoint) (Connection, error) + + // Endpoints lists endpoints the transport is listening on. Any endpoint IP + // addresses do not need to be normalized in any way (e.g. 0.0.0.0 is + // valid), as they should be preprocessed before being advertised. + Endpoints() []Endpoint +} +``` + +How the transport configures listening is transport-dependent, and not covered by the interface. This typically happens during transport construction, where a single instance of the transport is created and set to listen on an appropriate network interface before being passed to the router. + +#### Endpoints + +`Endpoint` represents a transport endpoint (e.g. an IP address and port). A connection always has two endpoints: one at the local node and one at the remote peer. Outbound connections to remote endpoints are made via `Dial()`, and inbound connections to listening endpoints are returned via `Accept()`. + +The `Endpoint` struct is: + +```go +// Endpoint represents a transport connection endpoint, either local or remote. +type Endpoint struct { + // Protocol specifies the transport protocol, used by the router to pick a + // transport for an endpoint. + Protocol Protocol + + // Path is an optional, arbitrary transport-specific path or identifier. + Path string + + // IP is an IP address (v4 or v6) to connect to. If set, this defines the + // endpoint as a networked endpoint. + IP net.IP + + // Port is a network port (either TCP or UDP). If not set, a default port + // may be used depending on the protocol. + Port uint16 +} + +// Protocol identifies a transport protocol. +type Protocol string +``` + +Endpoints are arbitrary transport-specific addresses, but if they are networked they must use IP addresses and thus rely on IP as a fundamental packet routing protocol. This enables policies for address discovery, advertisement, and exchange - for example, a private `192.168.0.0/24` IP address should only be advertised to peers on that IP network, while the public address `8.8.8.8` may be advertised to all peers. Similarly, any port numbers if given must represent TCP and/or UDP port numbers, in order to use [UPnP](https://en.wikipedia.org/wiki/Universal_Plug_and_Play) to autoconfigure e.g. NAT gateways. + +Non-networked endpoints (without an IP address) are considered local, and will only be advertised to other peers connecting via the same protocol. For example, an in-memory transport used for testing might have `Endpoint{Protocol: "memory", Path: "foo"}` as an address for the node "foo", and this should only be advertised to other nodes using `Protocol: "memory"`. + +#### Connections and Streams + +A connection represents an established transport connection between two endpoints (and thus two nodes), which can be used to exchange bytes via logically distinct IO streams. Connections are set up either via `Transport.Dial()` (outbound) or `Transport.Accept()` (inbound). The caller is responsible for verifying the remote peer's public key as returned by the connection, following the current MConn protocol behavior for now. + +Data is exchanged over IO streams created with `Connection.Stream()`. These implement the standard Go `io.Reader` and `io.Writer` interfaces to read and write bytes. Transports are free to choose how to implement such streams, e.g. by taking advantage of native stream support in the underlying protocol or through multiplexing. + +`Connection` and the related `Stream` interfaces are: + +```go +// Connection represents an established connection between two endpoints. +type Connection interface { + // Stream creates a new logically distinct IO stream within the connection. + Stream() (Stream, error) + + // LocalEndpoint returns the local endpoint for the connection. + LocalEndpoint() Endpoint + + // RemoteEndpoint returns the remote endpoint for the connection. + RemoteEndpoint() Endpoint + + // PubKey returns the public key of the remote peer. + PubKey() crypto.PubKey + + // Close closes the connection. + Close() error +} + +// Stream represents a single logical IO stream within a connection. +type Stream interface { + io.Reader // Read([]byte) (int, error) + io.Writer // Write([]byte) (int, error) + io.Closer // Close() error +} +``` + +### Peers + +Peers are other Tendermint network nodes. Each peer is identified by a unique `PeerID`, and has a set of `PeerAddress` addresses expressed as URLs that they can be reached at. Examples of peer addresses might be e.g.: + +* `mconn://b10c@host.domain.com:25567/path` +* `unix:///var/run/tendermint/peer.sock` +* `memory:testpeer` + +Addresses are resolved into one or more transport endpoints, e.g. by resolving DNS hostnames into IP addresses (which should be refreshed periodically). Peers should always be expressed as address URLs, and never as endpoints which are a lower-level construct. + +```go +// PeerID is a unique peer ID, generally expressed in hex form. +type PeerID []byte + +// PeerAddress is a peer address URL. The User field, if set, gives the +// hex-encoded remote PeerID, which should be verified with the remote peer's +// public key as returned by the connection. +type PeerAddress url.URL + +// Resolve resolves a PeerAddress into a set of Endpoints, typically by +// expanding out a DNS name in Host to its IP addresses. Field mapping: +// +// Scheme → Endpoint.Protocol +// Host → Endpoint.IP +// Port → Endpoint.Port +// Path+Query+Fragment,Opaque → Endpoint.Path +// +func (a PeerAddress) Resolve(ctx context.Context) []Endpoint { return nil } +``` + +The P2P stack needs to track a lot of internal information about peers, such as endpoints, status, priorities, and so on. This is done in an internal `peer` struct, which should not be exposed outside of the `p2p` package (e.g. to reactors) in order to avoid race conditions and lock contention - other packages should use `PeerID`. + +The `peer` struct might look like the following, but is intentionally underspecified and will depend on implementation requirements (for example, it will almost certainly have to track statistics about connection failures and retries): + +```go +// peer tracks internal status information about a peer. +type peer struct { + ID PeerID + Status PeerStatus + Priority PeerPriority + Endpoints map[PeerAddress][]Endpoint // Resolved endpoints by address. +} + +// PeerStatus specifies peer statuses. +type PeerStatus string + +const ( + PeerStatusNew = "new" // New peer which we haven't tried to contact yet. + PeerStatusUp = "up" // Peer which we have an active connection to. + PeerStatusDown = "down" // Peer which we're temporarily disconnected from. + PeerStatusRemoved = "removed" // Peer which has been removed. + PeerStatusBanned = "banned" // Peer which is banned for misbehavior. +) + +// PeerPriority specifies peer priorities. +type PeerPriority int + +const ( + PeerPriorityNormal PeerPriority = iota + 1 + PeerPriorityValidator + PeerPriorityPersistent +) +``` + +Peer information is stored in a `peerStore`, which may be persisted in an underlying database, and will replace the current address book either partially or in full. It is kept internal to avoid race conditions and tight coupling, and should at the very least contain basic CRUD functionality as outlined below, but will likely need additional functionality and is intentionally underspecified: + +```go +// peerStore contains information about peers, possibly persisted to disk. +type peerStore struct { + peers map[string]*peer // Entire set in memory, with PeerID.String() keys. + db dbm.DB // Database for persistence, if non-nil. +} + +func (p *peerStore) Delete(id PeerID) error { return nil } +func (p *peerStore) Get(id PeerID) (peer, bool) { return peer{}, false } +func (p *peerStore) List() []peer { return nil } +func (p *peerStore) Set(peer peer) error { return nil } +``` + +Peer address detection, advertisement and exchange (including detection of externally-reachable addresses via e.g. NAT gateways) is out of scope for this ADR, but may be covered in a separate ADR. The current PEX reactor should probably be absorbed into the core P2P stack and protocol instead of running as a separate reactor, since this needs to mutate the core peer data structures and will thus be tightly coupled with the router. + +### Channels + +While low-level data exchange happens via transport IO streams, the high-level API is based on a bidirectional `Channel` that can send and receive Protobuf messages addressed by `PeerID`. A channel is identified by an arbitrary `ChannelID` identifier, and can exchange Protobuf messages of one specific type (since the type to unmarshal into must be known). Message delivery is asynchronous and at-most-once. + +The channel can also be used to report peer errors, e.g. when receiving an invalid or malignant message. This may cause the peer to be disconnected or banned depending on the router's policy. + +A `Channel` has this interface: + +```go +// Channel is a bidirectional channel for Protobuf message exchange with peers. +type Channel struct { + // ID contains the channel ID. + ID ChannelID + + // messageType specifies the type of messages exchanged via the channel, and + // is used e.g. for automatic unmarshaling. + messageType proto.Message + + // In is a channel for receiving inbound messages. Envelope.From is always + // set. + In <-chan Envelope + + // Out is a channel for sending outbound messages. Envelope.To or Broadcast + // must be set, otherwise the message is discarded. + Out chan<- Envelope + + // Error is a channel for reporting peer errors to the router, typically used + // when peers send an invalid or malignant message. + Error chan<- PeerError +} + +// Close closes the channel, and is equivalent to close(Channel.Out). This will +// cause Channel.In to be closed when appropriate. The ID can then be reused. +func (c *Channel) Close() error { return nil } + +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 + +// Envelope specifies the message receiver and sender. +type Envelope struct { + From PeerID // Message sender, or empty for outbound messages. + To PeerID // Message receiver, or empty for inbound messages. + Broadcast bool // Send message to all connected peers, ignoring To. + Message proto.Message // Payload. +} + +// PeerError is a peer error reported by a reactor via the Error channel. The +// severity may cause the peer to be disconnected or banned depending on policy. +type PeerError struct { + PeerID PeerID + Err error + Severity PeerErrorSeverity +} + +// PeerErrorSeverity determines the severity of a peer error. +type PeerErrorSeverity string + +const ( + PeerErrorSeverityLow PeerErrorSeverity = "low" // Mostly ignored. + PeerErrorSeverityHigh PeerErrorSeverity = "high" // May disconnect. + PeerErrorSeverityCritical PeerErrorSeverity = "critical" // Ban. +) +``` + +A channel can reach any connected peer, and is implemented using transport streams against each individual peer, with an initial handshake to exchange the channel ID and any other metadata. The channel will automatically (un)marshal Protobuf to byte slices and use length-prefixed framing (the de facto standard for Protobuf streams) when writing them to the stream. + +Message scheduling and queueing is left as an implementation detail, and can use any number of algorithms such as FIFO, round-robin, priority queues, etc. Since message delivery is not guaranteed, both inbound and outbound messages may be dropped, buffered, or blocked as appropriate. + +Since a channel can only exchange messages of a single type, it is often useful to use a wrapper message type with e.g. a Protobuf `oneof` field that specifies a set of inner message types that it can contain. The channel can automatically perform this (un)wrapping if the outer message type implements the `Wrapper` interface (see [Reactor Example](#reactor-example) for an example): + +```go +// Wrapper is a Protobuf message that can contain a variety of inner messages. +// If a Channel's message type implements Wrapper, the channel will +// automatically (un)wrap passed messages using the container type, such that +// the channel can transparently support multiple message types. +type Wrapper interface { + // Wrap will take a message and wrap it in this one. + Wrap(proto.Message) error + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} +``` + +### Routers + +The router manages all P2P networking for a node, and is responsible for keeping track of network peers, maintaining transport connections, and routing channel messages. As such, it must do e.g. connection retries and backoff, message QoS scheduling and backpressure, peer quality assessments, and endpoint detection and advertisement. In addition, the router provides a mechanism to subscribe to peer updates (e.g. peers connecting or disconnecting), and handles reported peer errors from reactors. + +The implementation of the router is likely to be non-trivial, and is intentionally unspecified here. A separate ADR will likely be submitted for this. It is unclear whether message routing/scheduling and peer lifecycle management can be split into two separate components, or if these need to be tightly coupled. + +The `Router` API is as follows: + +```go +// Router manages connections to peers and routes Protobuf messages between them +// and local reactors. It also provides peer status updates and error reporting. +type Router struct{} + +// NewRouter creates a new router, using the given peer store to track peers. +// Transports must be pre-initialized to listen on appropriate endpoints. +func NewRouter(peerStore *peerStore, transports map[Protocol]Transport) *Router { return nil } + +// Channel opens a new channel with the given ID. messageType should be an empty +// Protobuf message of the type that will be passed through the channel. The +// message can implement Wrapper for automatic message (un)wrapping. +func (r *Router) Channel(id ChannelID, messageType proto.Message) (*Channel, error) { return nil, nil } + +// PeerUpdates returns a channel with peer updates. The caller must cancel the +// context to end the subscription, and keep consuming messages in a timely +// fashion until the channel is closed to avoid blocking updates. +func (r *Router) PeerUpdates(ctx context.Context) PeerUpdates { return nil } + +// PeerUpdates is a channel for receiving peer updates. +type PeerUpdates <-chan PeerUpdate + +// PeerUpdate is a peer status update for reactors. +type PeerUpdate struct { + PeerID PeerID + Status PeerStatus +} +``` + +### Reactor Example + +While reactors are a first-class concept in the current P2P stack (i.e. there is an explicit `p2p.Reactor` interface), they will simply be a design pattern in the new stack, loosely defined as "something which listens on a channel and reacts to messages". + +Since reactors have very few formal constraints, they can be implemented in a variety of ways. There is currently no recommended pattern for implementing reactors, to avoid overspecification and scope creep in this ADR. However, prototyping and developing a reactor pattern should be done early during implementation, to make sure reactors built using the `Channel` interface can satisfy the needs for convenience, deterministic tests, and reliability. + +Below is a trivial example of a simple echo reactor implemented as a function. The reactor will exchange the following Protobuf messages: + +```protobuf +message EchoMessage { + oneof inner { + PingMessage ping = 1; + PongMessage pong = 2; + } +} + +message PingMessage { + string content = 1; +} + +message PongMessage { + string content = 1; +} +``` + +Implementing the `Wrapper` interface for `EchoMessage` allows transparently passing `PingMessage` and `PongMessage` through the channel, where it will automatically be (un)wrapped in an `EchoMessage`: + +```go +func (m *EchoMessage) Wrap(inner proto.Message) error { + switch inner := inner.(type) { + case *PingMessage: + m.Inner = &EchoMessage_PingMessage{Ping: inner} + case *PongMessage: + m.Inner = &EchoMessage_PongMessage{Pong: inner} + default: + return fmt.Errorf("unknown message %T", inner) + } + return nil +} + +func (m *EchoMessage) Unwrap() (proto.Message, error) { + switch inner := m.Inner.(type) { + case *EchoMessage_PingMessage: + return inner.Ping, nil + case *EchoMessage_PongMessage: + return inner.Pong, nil + default: + return nil, fmt.Errorf("unknown message %T", inner) + } +} +``` + +The reactor itself would be implemented e.g. like this: + +```go +// RunEchoReactor wires up an echo reactor to a router and runs it. +func RunEchoReactor(router *p2p.Router) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + channel, err := router.Channel(1, &EchoMessage{}) + if err != nil { + return err + } + defer channel.Close() + + return EchoReactor(ctx, channel, router.PeerUpdates(ctx)) +} + +// EchoReactor provides an echo service, pinging all known peers until cancelled. +func EchoReactor(ctx context.Context, channel *p2p.Channel, peerUpdates p2p.PeerUpdates) error { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + // Send ping message to all known peers every 5 seconds. + case <-ticker.C: + channel.Out <- Envelope{ + Broadcast: true, + Message: &PingMessage{Content: "👋"}, + } + + // When we receive a message from a peer, either respond to ping, output + // pong, or report peer error on unknown message type. + case envelope := <-channel.In: + switch msg := envelope.Message.(type) { + case *PingMessage: + channel.Out <- Envelope{ + To: envelope.From, + Message: &PongMessage{Content: msg.Content}, + } + + case *PongMessage: + fmt.Printf("%q replied with %q\n", envelope.From, msg.Content) + + default: + channel.Error <- PeerError{ + PeerID: envelope.From, + Err: fmt.Errorf("unexpected message %T", msg), + Severity: PeerErrorSeverityLow, + } + } + + // Output info about any peer status changes. + case peerUpdate := <-peerUpdates: + fmt.Printf("Peer %q changed status to %q", peerUpdate.PeerID, peerUpdate.Status) + + // Exit when context is cancelled. + case <-ctx.Done(): + return nil + } + } +} +``` + +### Implementation Plan + +The existing P2P stack should be gradually migrated towards this design. The easiest path would likely be: + +1. Implement the `Channel` and `PeerUpdates` APIs as shims on top of the current `Switch` and `Peer` APIs, and rewrite all reactors to use them instead. + +2. Port the `privval` package to no longer use `SecretConnection` (e.g. by using gRPC instead), or temporarily duplicate its functionality. + +3. Rewrite the current MConn connection and transport code to use the new `Transport` API, and migrate existing code to use it instead. + +4. Implement the new `peer` and `peerStore` APIs, and either make the current address book a shim on top of these or replace it. + +5. Replace the existing `Switch` abstraction with the new `Router`. + +6. Move the PEX reactor and other address advertisement/exchange into the P2P core, possibly the `Router`. + +7. Consider rewriting and/or cleaning up reactors and other P2P-related code to make better use of the new abstractions. + +A note on backwards-compatibility: the current MConn protocol takes whole messages expressed as byte slices and splits them up into `PacketMsg` messages, where the final packet of a message has `PacketMsg.EOF` set. In order to maintain wire-compatibility with this protocol, the MConn transport needs to be aware of message boundaries, even though it does not care what the messages actually are. One way to handle this is to break abstraction boundaries and have the transport decode the input's length-prefixed message framing and use this to determine message boundaries, unless we accept breaking the protocol here. + +Similarly, implementing channel handshakes with the current MConn protocol would require doing an initial connection handshake as today and use that information to "fake" the local channel handshake without it hitting the wire. + +## Status + +Accepted + +## Consequences + +### Positive + +* Reduced coupling and simplified interfaces should lead to better understandability, increased reliability, and more testing. + +* Using message passing via Go channels gives better control of backpressure and quality-of-service scheduling. + +* Peer lifecycle and connection management is centralized in a single entity, making it easier to reason about. + +* Detection, advertisement, and exchange of node addresses will be improved. + +* Additional transports (e.g. QUIC) can be implemented and used in parallel with the existing MConn protocol. + +* The P2P protocol will not be broken in the initial version, if possible. + +### Negative + +* Fully implementing the new design as indended is likely to require breaking changes to the P2P protocol at some point, although the initial implementation shouldn't. + +* Gradually migrating the existing stack and maintaining backwards-compatibility will be more labor-intensive than simply replacing the entire stack. + +* A complete overhaul of P2P internals is likely to cause temporary performance regressions and bugs as the implementation matures. + +* Hiding peer management information inside the `p2p` package may prevent certain functionality or require additional deliberate interfaces for information exchange, as a tradeoff to simplify the design, reduce coupling, and avoid race conditions and lock contention. + +### Neutral + +* Implementation details around e.g. peer management, message scheduling, and peer and endpoint advertisement are not yet determined. + +## References + +* [ADR 061: P2P Refactor Scope](adr-061-p2p-refactor-scope.md) +* [#5670 p2p: internal refactor and architecture redesign](https://github.com/tendermint/tendermint/issues/5670) diff --git a/docs/architecture/adr-063-privval-grpc.md b/docs/architecture/adr-063-privval-grpc.md new file mode 100644 index 0000000000..efbca5c6ec --- /dev/null +++ b/docs/architecture/adr-063-privval-grpc.md @@ -0,0 +1,108 @@ +# ADR 063: Privval gRPC + +## Changelog + +- 23/11/2020: Initial Version (@marbar3778) + +## Context + +Validators use remote signers to help secure their keys. This system is Tendermint's recommended way to secure validators, but the path to integration with Tendermint's private validator client is plagued with custom protocols. + +Tendermint uses its own custom secure connection protocol (`SecretConnection`) and a raw tcp/unix socket connection protocol. The secure connection protocol until recently was exposed to man in the middle attacks and can take longer to integrate if not using Golang. The raw tcp connection protocol is less custom, but has been causing minute issues with users. + +Migrating Tendermint's private validator client to a widely adopted protocol, gRPC, will ease the current maintenance and integration burden experienced with the current protocol. + +## Decision + +After discussing with multiple stake holders, [gRPC](https://grpc.io/) was decided on to replace the current private validator protocol. gRPC is a widely adopted protocol in the micro-service and cloud infrastructure world. gRPC uses [protocol-buffers](https://developers.google.com/protocol-buffers) to describe its services, providing a language agnostic implementation. Tendermint uses protobuf for on disk and over the wire encoding already making the integration with gRPC simpler. + +## Alternative Approaches + +- JSON-RPC: We did not consider JSON-RPC because Tendermint uses protobuf extensively making gRPC a natural choice. + +## Detailed Design + +With the recent integration of [Protobuf](https://developers.google.com/protocol-buffers) into Tendermint the needed changes to migrate from the current private validator protocol to gRPC is not large. + +The [service definition](https://grpc.io/docs/what-is-grpc/core-concepts/#service-definition) for gRPC will be defined as: + +```proto + service PrivValidatorAPI { + rpc GetPubKey(tendermint.proto.privval.PubKeyRequest) returns (tendermint.proto.privval.PubKeyResponse); + rpc SignVote(tendermint.proto.privval.SignVoteRequest) returns (tendermint.proto.privval.SignedVoteResponse); + rpc SignProposal(tendermint.proto.privval.SignProposalRequest) returns (tendermint.proto.privval.SignedProposalResponse); + + message PubKeyRequest { + string chain_id = 1; + } + + // PubKeyResponse is a response message containing the public key. + message PubKeyResponse { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + } + + // SignVoteRequest is a request to sign a vote + message SignVoteRequest { + tendermint.types.Vote vote = 1; + string chain_id = 2; + } + + // SignedVoteResponse is a response containing a signed vote or an error + message SignedVoteResponse { + tendermint.types.Vote vote = 1 [(gogoproto.nullable) = false]; + } + + // SignProposalRequest is a request to sign a proposal + message SignProposalRequest { + tendermint.types.Proposal proposal = 1; + string chain_id = 2; + } + + // SignedProposalResponse is response containing a signed proposal or an error + message SignedProposalResponse { + tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; + } +} +``` + +> Note: Remote Singer errors are removed in favor of [grpc status error codes](https://grpc.io/docs/guides/error/). + +In previous versions of the remote signer, Tendermint acted as the server and the remote signer as the client. In this process the client established a long lived connection providing a way for the server to make requests to the client. In the new version it has been simplified. Tendermint is the client and the remote signer is the server. This follows client and server architecture and simplifies the previous protocol. + +#### Keep Alive + +If you have worked on the private validator system you will see that we are removing the `PingRequest` and `PingResponse` messages. These messages were used to create functionality which kept the connection alive. With gRPC there is a [keep alive feature](https://github.com/grpc/grpc/blob/master/doc/keepalive.md) that will be added along side the integration to provide the same functionality. + +#### Metrics + +Remote signers are crucial to operating secure and consistently up Validators. In the past there were no metrics to tell the operator if something is wrong other than the node not signing. Integrating metrics into the client and provided server will be done with [prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). This will be integrated into node's prometheus export for node operators. + +#### Security + +[TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) is widely adopted with the use of gRPC. There are various forms of TLS (one-way & two-way). One way is the client identifying who the server is, while two way is both parties identifying the other. For Tendermint's use case having both parties identifying each other provides adds an extra layer of security. This requires users to generate both client and server certificates for a TLS connection. + +An insecure option will be provided for users who do not wish to secure the connection. + +#### Upgrade Path + +This is a largely breaking change for validator operators. The optimal upgrade path would be to release gRPC in a minor release, allow key management systems to migrate to the new protocol. In the next major release the current system (raw tcp/unix) is removed. This allows users to migrate to the new system and not have to coordinate upgrading the key management system alongside a network upgrade. + +The upgrade of [tmkms](https://github.com/iqlusioninc/tmkms) will be coordinated with Iqlusion. They will be able to make the necessary upgrades to allow users to migrate to gRPC from the current protocol. + +## Status + +Proposed + +### Positive + +- Use an adopted standard for secure communication. (TLS) +- Use an adopted communication protocol. (gRPC) +- Requests are multiplexed onto the tcp connection. (http/2) +- Language agnostic service definition. + +### Negative + +- Users will need to generate certificates to use TLS. (Added step) +- Users will need to find a supported gRPC supported key management system + +### Neutral diff --git a/docs/architecture/img/adr-062-architecture.svg b/docs/architecture/img/adr-062-architecture.svg new file mode 100644 index 0000000000..1ad18a3e0c --- /dev/null +++ b/docs/architecture/img/adr-062-architecture.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/nodes/README.md b/docs/nodes/README.md new file mode 100644 index 0000000000..4a8815a151 --- /dev/null +++ b/docs/nodes/README.md @@ -0,0 +1,43 @@ +--- +order: 1 +parent: + title: Nodes + order: 4 +--- + +This section will focus on how to operate full nodes, validators and light clients. + +- [Node Types](#node-types) +- [Configuration](./configuration.md) + - [Configure State sync](./state_sync.md) +- [Validator Guides](./validators.md) + - [How to secure your keys](./validators.md#validator_keys) +- [Light Client guides](./light-client.md) + - [How to sync a light client](./light-client.md#) +- [Metrics](./metrics.md) + +## Node Types + +We will cover the various types of node types within Tendermint. + +### Full Node + + A full node is a node that participates in the network but will not help secure it. Full nodes can be used to store the entire state of a blockchain. For Tendermint there are two forms of state. First, blockchain state, this represents the blocks of a blockchain. Secondly, there is Application state, this represents the state that transactions modify. The knowledge of how a transaction can modify state is not held by Tendermint but rather the application on the other side of the ABCI boundary. + + > Note: If you have not read about the seperation of consensus and application please take a few minutes to read up on it as it will provide a better understanding to many of the terms we use throughout the documentation. You can find more information on the ABCI [here](../app-dev/app-architecture.md). + + As a full node operator you are providing services to the network that helps it come to consensus and others catch up to the current block. Even though a full node only helps the network come to consensus it is important to secure your node from adversarial actors. We recommend using a firewall and a proxy if possible. Running a full node can be easy, but it varies from network to network. Verify your applications documentation prior running a node. + +### Seed Nodes + + A seed node provides a node with a list of peers which a node can connect to. When starting a node you must provide at least one type of node to be able to connect to the desired network. By providing a seed node you will be able to populate your address quickly. A seed node will not be kept as a peer but will disconnect from your node after it has provided a list of peers. + +### Sentry Node + + A sentry node is similar to a full node in almost every way. The difference is a sentry node will have one or more private peers. These peers may be validators or other full nodes in the network. A sentry node is meant to provide a layer of security for your validator, similar to how a firewall works with a computer. + +### Validators + +Validators are nodes that participate in the security of a network. Validators have an associated power in Tendermint, this power can represent stake in a [proof of stake](https://en.wikipedia.org/wiki/Proof_of_stake) system, reputation in [proof of authority](https://en.wikipedia.org/wiki/Proof_of_authority) or any sort of measurable unit. Running a secure and consistently online validator is crucial to a networks health. A validator must be secure and fault tolerant, it is recommended to run your validator with 2 or more sentry nodes. + +As a validator there is the potential to have your weight reduced, this is defined by the application. Tendermint is notified by the application if a validator should have there weight increased or reduced. Application have different types of malicious behavior which lead to slashing of the validators power. Please check the documentation of the application you will be running in order to find more information. diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md new file mode 100644 index 0000000000..49a69c5bf0 --- /dev/null +++ b/docs/nodes/configuration.md @@ -0,0 +1,491 @@ +--- +order: 3 +--- + +# Configuration + +Tendermint Core can be configured via a TOML file in +`$TMHOME/config/config.toml`. Some of these parameters can be overridden by +command-line flags. For most users, the options in the `##### main base configuration options #####` are intended to be modified while config options +further below are intended for advance power users. + +## Options + +The default configuration file create by `tendermint init` has all +the parameters set with their default values. It will look something +like the file below, however, double check by inspecting the +`config.toml` created with your version of `tendermint` installed: + +```toml +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "anonymous" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,statesync:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configurattion Option ### +####################################################### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +max_batch_bytes = 10485760 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" + +``` + +## Empty blocks VS no empty blocks + +### create_empty_blocks = true + +If `create_empty_blocks` is set to `true` in your config, blocks will be +created ~ every second (with default consensus parameters). You can regulate +the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. + +### create_empty_blocks = false + +In this setting, blocks are created when transactions received. + +Note after the block H, Tendermint creates something we call a "proof block" +(only if the application hash changed) H+1. The reason for this is to support +proofs. If you have a transaction in block H that changes the state to X, the +new application hash will only be included in block H+1. If after your +transaction is committed, you want to get a light-client proof for the new state +(X), you need the new block to be committed in order to do that because the new +block has the new application hash for the state X. That's why we make a new +(empty) block if the application hash changes. Otherwise, you won't be able to +make a proof for the new state. + +Plus, if you set `create_empty_blocks_interval` to something other than the +default (`0`), Tendermint will be creating empty blocks even in the absence of +transactions every `create_empty_blocks_interval`. For instance, with +`create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, +Tendermint will only create blocks if there are transactions, or after waiting +30 seconds without receiving any transactions. + +## Consensus timeouts explained + +There's a variety of information about timeouts in [Running in +production](./running-in-production.md) + +You can also find more detailed technical explanation in the spec: [The latest +gossip on BFT consensus](https://arxiv.org/abs/1807.04938). + +```toml +[consensus] +... + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" +``` + +Note that in a successful round, the only timeout that we absolutely wait no +matter what is `timeout_commit`. + +Here's a brief summary of the timeouts: + +- `timeout_propose` = how long we wait for a proposal block before prevoting + nil +- `timeout_propose_delta` = how much timeout_propose increases with each round +- `timeout_prevote` = how long we wait after receiving +2/3 prevotes for + anything (ie. not a single block or nil) +- `timeout_prevote_delta` = how much the timeout_prevote increases with each + round +- `timeout_precommit` = how long we wait after receiving +2/3 precommits for + anything (ie. not a single block or nil) +- `timeout_precommit_delta` = how much the timeout_precommit increases with + each round +- `timeout_commit` = how long we wait after committing a block, before starting + on the new height (this gives us a chance to receive some more precommits, + even though we already have +2/3) + +## P2P settings + +This section will cover settings within the p2p section of the `config.toml`. + +- `external_address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port. +- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks +- `persistent_peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added. +- `max_num_inbound_peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). +- `max_num_outbound_peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). +- `unconditional_peer_ids` = is similar to `persistent_peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. +- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. +- `seed_mode` = is used for when node operators want to run their node as a seed node. Seed node's run a variation of the PeX protocol that disconnects from peers after sending them a list of peers to connect to. To minimize the servers usage, it is recommended to set the mempool's size to 0. +- `private_peer_ids` = is a comma separated list of node ids that you would not like exposed to other peers (ie. you will not tell other peers about the private_peer_ids). This can be filled with a validators node id. diff --git a/docs/nodes/light-client.md b/docs/nodes/light-client.md new file mode 100644 index 0000000000..9e9e084522 --- /dev/null +++ b/docs/nodes/light-client.md @@ -0,0 +1,39 @@ +--- +order: 6 +--- + +# Configure a Light Client + +Tendermint comes with a built-in `tendermint light` command, which can be used +to run a light client proxy server, verifying Tendermint RPC. All calls that +can be tracked back to a block header by a proof will be verified before +passing them back to the caller. Other than that, it will present the same +interface as a full Tendermint node. + +You can start the light client proxy server by running `tendermint light `, +with a variety of flags to specify the primary node, the witness nodes (which cross-check +the information provided by the primary), the hash and height of the trusted header, +and more. + +For example: + +```bash +$ tendermint light supernova -p tcp://233.123.0.140:26657 \ + -w tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \ + --height=10 --hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57 +``` + +For additional options, run `tendermint light --help`. + +## Where to obtain trusted height & hash + +One way to obtain a semi-trusted hash & height is to query multiple full nodes +and compare their hashes: + +```bash +$ curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" +{ + "height": "273", + "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" +} +``` diff --git a/docs/tendermint-core/local_config.png b/docs/nodes/local_config.png similarity index 100% rename from docs/tendermint-core/local_config.png rename to docs/nodes/local_config.png diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md new file mode 100644 index 0000000000..b817a7db1e --- /dev/null +++ b/docs/nodes/metrics.md @@ -0,0 +1,60 @@ +--- +order: 4 +--- + +# Metrics + +Tendermint can report and serve the Prometheus metrics, which in their turn can +be consumed by Prometheus collector(s). + +This functionality is disabled by default. + +To enable the Prometheus metrics, set `instrumentation.prometheus=true` if your +config file. Metrics will be served under `/metrics` on 26660 port by default. +Listen address can be changed in the config file (see +`instrumentation.prometheus\_listen\_addr`). + +## List of available metrics + +The following metrics are available: + +| **Name** | **Type** | **Tags** | **Description** | +| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | +| consensus_height | Gauge | | Height of the chain | +| consensus_validators | Gauge | | Number of validators | +| consensus_validators_power | Gauge | | Total voting power of all validators | +| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | +| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | +| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus_missing_validators | Gauge | | Number of validators who did not sign | +| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | +| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | +| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | +| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus_rounds | Gauge | | Number of rounds | +| consensus_num_txs | Gauge | | Number of transactions | +| consensus_total_txs | Gauge | | Total number of transactions committed | +| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | +| consensus_latest_block_height | gauge | | /status sync_info number | +| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | +| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | +| consensus_block_size_bytes | Gauge | | Block size in bytes | +| p2p_peers | Gauge | | Number of peers node's connected to | +| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | +| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | +| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | +| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | +| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | +| mempool_size | Gauge | | Number of uncommitted transactions | +| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | +| mempool_failed_txs | counter | | number of failed transactions | +| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | +| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | + +## Useful queries + +Percentage of missing + byzantine validators: + +```md +((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 +``` diff --git a/docs/tendermint-core/sentry_layout.png b/docs/nodes/sentry_layout.png similarity index 100% rename from docs/tendermint-core/sentry_layout.png rename to docs/nodes/sentry_layout.png diff --git a/docs/nodes/state-sync.md b/docs/nodes/state-sync.md new file mode 100644 index 0000000000..9ead794594 --- /dev/null +++ b/docs/nodes/state-sync.md @@ -0,0 +1,42 @@ +--- +order: 5 +--- + +# Configure State-Sync + +State sync will continuously work in the background to supply nodes with chunked data when bootstrapping. + +> NOTE: Before trying to use state sync, see if the application you are operating a node for supports it. + +Under the state sync section in `config.toml` you will find multiple settings that need to be configured in order for your node to use state sync. + +Lets breakdown the settings: + +- `enable`: Enable is to inform the node that you will be using state sync to bootstrap your node. +- `rpc_servers`: RPC servers are needed because state sync utilizes the light client for verification. + - 2 servers are required, more is always helpful. +- `temp_dir`: Temporary directory is store the chunks in the machines local storage, If nothing is set it will create a directory in `/tmp` + +The next information you will need to acquire it through publicly exposed RPC's or a block explorer which you trust. + +- `trust_height`: Trusted height defines at which height your node should trust the chain. +- `trust_hash`: Trusted hash is the hash in the `BlockID` corresponding to the trusted height. +- `trust_period`: Trust period is the period in which headers can be verified. + > :warning: This value should be significantly smaller than the unbonding period. + +If you are relying on publicly exposed RPC's to get the need information, you can use `curl`. + +Example: + +```bash +curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" +``` + +The response will be: + +```json +{ + "height": "273", + "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" +} +``` diff --git a/docs/nodes/validators.md b/docs/nodes/validators.md new file mode 100644 index 0000000000..cccc1f7550 --- /dev/null +++ b/docs/nodes/validators.md @@ -0,0 +1,114 @@ +--- +order: 2 +--- + +# Validators + +Validators are responsible for committing new blocks in the blockchain. +These validators participate in the consensus protocol by broadcasting +_votes_ which contain cryptographic signatures signed by each +validator's private key. + +Some Proof-of-Stake consensus algorithms aim to create a "completely" +decentralized system where all stakeholders (even those who are not +always available online) participate in the committing of blocks. +Tendermint has a different approach to block creation. Validators are +expected to be online, and the set of validators is permissioned/curated +by some external process. Proof-of-stake is not required, but can be +implemented on top of Tendermint consensus. That is, validators may be +required to post collateral on-chain, off-chain, or may not be required +to post any collateral at all. + +Validators have a cryptographic key-pair and an associated amount of +"voting power". Voting power need not be the same. + +## Becoming a Validator + +There are two ways to become validator. + +1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) +2. The ABCI app responds to the EndBlock message with changes to the + existing validator set. + +## Setting up a Validator + +When setting up a validator there are countless ways to configure your setup. This guide is aimed at showing one of them, the sentry node design. This design is mainly for DDOS prevention. + +### Network Layout + +![ALT Network Layout](./sentry_layout.png) + +The diagram is based on AWS, other cloud providers will have similar solutions to design a solution. Running nodes is not limited to cloud providers, you can run nodes on bare metal systems as well. The architecture will be the same no matter which setup you decide to go with. + +The proposed network diagram is similar to the classical backend/frontend separation of services in a corporate environment. The “backend” in this case is the private network of the validator in the data center. The data center network might involve multiple subnets, firewalls and redundancy devices, which is not detailed on this diagram. The important point is that the data center allows direct connectivity to the chosen cloud environment. Amazon AWS has “Direct Connect”, while Google Cloud has “Partner Interconnect”. This is a dedicated connection to the cloud provider (usually directly to your virtual private cloud instance in one of the regions). + +All sentry nodes (the “frontend”) connect to the validator using this private connection. The validator does not have a public IP address to provide its services. + +Amazon has multiple availability zones within a region. One can install sentry nodes in other regions too. In this case the second, third and further regions need to have a private connection to the validator node. This can be achieved by VPC Peering (“VPC Network Peering” in Google Cloud). In this case, the second, third and further region sentry nodes will be directed to the first region and through the direct connect to the data center, arriving to the validator. + +A more persistent solution (not detailed on the diagram) is to have multiple direct connections to different regions from the data center. This way VPC Peering is not mandatory, although still beneficial for the sentry nodes. This overcomes the risk of depending on one region. It is more costly. + +### Local Configuration + +![ALT Local Configuration](./local_config.png) + +The validator will only talk to the sentry that are provided, the sentry nodes will communicate to the validator via a secret connection and the rest of the network through a normal connection. The sentry nodes do have the option of communicating with each other as well. + +When initializing nodes there are five parameters in the `config.toml` that may need to be altered. + +- `pex:` boolean. This turns the peer exchange reactor on or off for a node. When `pex=false`, only the `persistent_peers` list is available for connection. +- `persistent_peers:` a comma separated list of `nodeID@ip:port` values that define a list of peers that are expected to be online at all times. This is necessary at first startup because by setting `pex=false` the node will not be able to join the network. +- `unconditional_peer_ids:` comma separated list of nodeID's. These nodes will be connected to no matter the limits of inbound and outbound peers. This is useful for when sentry nodes have full address books. +- `private_peer_ids:` comma separated list of nodeID's. These nodes will not be gossiped to the network. This is an important field as you do not want your validator IP gossiped to the network. +- `addr_book_strict:` boolean. By default nodes with a routable address will be considered for connection. If this setting is turned off (false), non-routable IP addresses, like addresses in a private network can be added to the address book. +- `double_sign_check_height` int64 height. How many blocks to look back to check existence of the node's consensus votes before joining consensus When non-zero, the node will panic upon restart if the same consensus key was used to sign {double_sign_check_height} last blocks. So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. + +#### Validator Node Configuration + +| Config Option | Setting | +| ------------------------ | -------------------------- | +| pex | false | +| persistent_peers | list of sentry nodes | +| private_peer_ids | none | +| unconditional_peer_ids | optionally sentry node IDs | +| addr_book_strict | false | +| double_sign_check_height | 10 | + +The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. + +#### Sentry Node Configuration + +| Config Option | Setting | +| ---------------------- | --------------------------------------------- | +| pex | true | +| persistent_peers | validator node, optionally other sentry nodes | +| private_peer_ids | validator node ID | +| unconditional_peer_ids | validator node ID, optionally sentry node IDs | +| addr_book_strict | false | + +The sentry nodes should be able to talk to the entire network hence why `pex=true`. The persistent peers of a sentry node will be the validator, and optionally other sentry nodes. The sentry nodes should make sure that they do not gossip the validator's ip, to do this you must put the validators nodeID as a private peer. The unconditional peer IDs will be the validator ID and optionally other sentry nodes. + +> Note: Do not forget to secure your node's firewalls when setting them up. + +More Information can be found at these links: + +- +- + +### Validator keys + +Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM. + +Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs. + +## Committing a Block + +> **+2/3 is short for "more than 2/3"** + +A block is committed when +2/3 of the validator set sign [precommit +votes](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#vote) for that block at the same `round`. +The +2/3 set of precommit votes is called a +[_commit_](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#commit). While any +2/3 set of +precommits for the same block at the same height&round can serve as +validation, the canonical commit is included in the next block (see +[LastCommit](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#lastcommit)). diff --git a/docs/package-lock.json b/docs/package-lock.json index 86d518781a..55de59381c 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -5,118 +5,118 @@ "requires": true, "dependencies": { "@algolia/cache-browser-local-storage": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.5.1.tgz", - "integrity": "sha512-TAQHRHaCUAR0bNhUHG0CnO6FTx3EMPwZQrjPuNS6kHvCQ/H8dVD0sLsHyM8C7U4j33xPQCWi9TBnSx8cYXNmNw==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.8.3.tgz", + "integrity": "sha512-Cwc03hikHSUI+xvgUdN+H+f6jFyoDsC9fegzXzJ2nPn1YSN9EXzDMBnbrgl0sbl9iLGXe0EIGMYqR2giCv1wMQ==", "requires": { - "@algolia/cache-common": "4.5.1" + "@algolia/cache-common": "4.8.3" } }, "@algolia/cache-common": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.5.1.tgz", - "integrity": "sha512-Sux+pcedQi9sfScIiQdl6pEaTVl712qM9OblvDhnaeF1v6lf4jyTlRTiBLP7YBLuvO1Yo54W3maf03kmz9PVhA==" + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.8.3.tgz", + "integrity": "sha512-Cf7zZ2i6H+tLSBTkFePHhYvlgc9fnMPKsF9qTmiU38kFIGORy/TN2Fx5n1GBuRLIzaSXvcf+oHv1HvU0u1gE1g==" }, "@algolia/cache-in-memory": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.5.1.tgz", - "integrity": "sha512-fzwAtBFwveuG+E5T/namChEIvdVl0DoV3djV1C078b/JpO5+DeAwuXIJGYbyl950u170n5NEYuIwYG+R6h4lJQ==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.8.3.tgz", + "integrity": "sha512-+N7tkvmijXiDy2E7u1mM73AGEgGPWFmEmPeJS96oT46I98KXAwVPNYbcAqBE79YlixdXpkYJk41cFcORzNh+Iw==", "requires": { - "@algolia/cache-common": "4.5.1" + "@algolia/cache-common": "4.8.3" } }, "@algolia/client-account": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.5.1.tgz", - "integrity": "sha512-2WFEaI7Zf4ljnBsSAS4e+YylZ5glovm78xFg4E1JKA8PE6M+TeIgUY6HO2ouLh2dqQKxc9UfdAT1Loo/dha2iQ==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.8.3.tgz", + "integrity": "sha512-Uku8LqnXBwfDCtsTCDYTUOz2/2oqcAQCKgaO0uGdIR8DTQENBXFQvzziambHdn9KuFuY+6Et9k1+cjpTPBDTBg==", "requires": { - "@algolia/client-common": "4.5.1", - "@algolia/client-search": "4.5.1", - "@algolia/transporter": "4.5.1" + "@algolia/client-common": "4.8.3", + "@algolia/client-search": "4.8.3", + "@algolia/transporter": "4.8.3" } }, "@algolia/client-analytics": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.5.1.tgz", - "integrity": "sha512-bTmZUU8zhZMWBeGEQ/TVqLoL3OOT0benU0HtS3iOnQURwb+AOCv3RsgZvkj2djp+M24Q6P8/L34uBJMmCurbLg==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.8.3.tgz", + "integrity": "sha512-9ensIWmjYJprZ+YjAVSZdWUG05xEnbytENXp508X59tf34IMIX8BR2xl0RjAQODtxBdAteGxuKt5THX6U9tQLA==", "requires": { - "@algolia/client-common": "4.5.1", - "@algolia/client-search": "4.5.1", - "@algolia/requester-common": "4.5.1", - "@algolia/transporter": "4.5.1" + "@algolia/client-common": "4.8.3", + "@algolia/client-search": "4.8.3", + "@algolia/requester-common": "4.8.3", + "@algolia/transporter": "4.8.3" } }, "@algolia/client-common": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.5.1.tgz", - "integrity": "sha512-5CpIf8IK1hke7q+N4e+A4TWdFXVJ5Qwyaa0xS84DrDO8HQ7vfYbDvG1oYa9hVEtGn6c3WVKPAvuWynK+fXQQCA==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.8.3.tgz", + "integrity": "sha512-TU3623AEFAWUQlDTznkgAMSYo8lfS9pNs5QYDQzkvzWdqK0GBDWthwdRfo9iIsfxiR9qdCMHqwEu+AlZMVhNSA==", "requires": { - "@algolia/requester-common": "4.5.1", - "@algolia/transporter": "4.5.1" + "@algolia/requester-common": "4.8.3", + "@algolia/transporter": "4.8.3" } }, "@algolia/client-recommendation": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.5.1.tgz", - "integrity": "sha512-GiFrNSImoEBUQICjFBEoxPGzrjWji8PY9GeMg2CNvOYcRQ0Xt0Y36v9GN53NLjvB7QdQ2FlE1Cuv/PLUfS/aQQ==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.8.3.tgz", + "integrity": "sha512-qysGbmkcc6Agt29E38KWJq9JuxjGsyEYoKuX9K+P5HyQh08yR/BlRYrA8mB7vT/OIUHRGFToGO6Vq/rcg0NIOQ==", "requires": { - "@algolia/client-common": "4.5.1", - "@algolia/requester-common": "4.5.1", - "@algolia/transporter": "4.5.1" + "@algolia/client-common": "4.8.3", + "@algolia/requester-common": "4.8.3", + "@algolia/transporter": "4.8.3" } }, "@algolia/client-search": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.5.1.tgz", - "integrity": "sha512-wjuOTte9Auo9Cg4fL0709PjeJ9rXFh4okYUrOt/2SWqQid6DSdZOp+BtyaHKV3E94sj+SlmMxkMUacYluYg5zA==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.8.3.tgz", + "integrity": "sha512-rAnvoy3GAhbzOQVniFcKVn1eM2NX77LearzYNCbtFrFYavG+hJI187bNVmajToiuGZ10FfJvK99X2OB1AzzezQ==", "requires": { - "@algolia/client-common": "4.5.1", - "@algolia/requester-common": "4.5.1", - "@algolia/transporter": "4.5.1" + "@algolia/client-common": "4.8.3", + "@algolia/requester-common": "4.8.3", + "@algolia/transporter": "4.8.3" } }, "@algolia/logger-common": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.5.1.tgz", - "integrity": "sha512-ZoVnGriinlLHlkvn5K7djOUn1/1IeTjU8rDzOJ3t06T+2hQytgJghaX7rSwKIeH4CjWMy61w8jLisuGJRBOEeg==" + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.8.3.tgz", + "integrity": "sha512-03wksHRbhl2DouEKnqWuUb64s1lV6kDAAabMCQ2Du1fb8X/WhDmxHC4UXMzypeOGlH5BZBsgVwSB7vsZLP3MZg==" }, "@algolia/logger-console": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.5.1.tgz", - "integrity": "sha512-1qa7K18+uAgxyWuguayaDS5ViiZFcOjI3J5ACBb0i/n7RsXUo149lP6mwmx6TIU7s135hT0f0TCqnvfMvN1ilA==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.8.3.tgz", + "integrity": "sha512-Npt+hI4UF8t3TLMluL5utr9Gc11BjL5kDnGZOhDOAz5jYiSO2nrHMFmnpLT4Cy/u7a5t7EB5dlypuC4/AGStkA==", "requires": { - "@algolia/logger-common": "4.5.1" + "@algolia/logger-common": "4.8.3" } }, "@algolia/requester-browser-xhr": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.5.1.tgz", - "integrity": "sha512-tsQz+9pZw9dwPm/wMvZDpsWFZgmghLjXi4c3O4rfwoP/Ikum5fhle5fiR14yb4Lw4WlOQ1AJIHJvrg1qLIG8hQ==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.8.3.tgz", + "integrity": "sha512-/LTTIpgEmEwkyhn8yXxDdBWqXqzlgw5w2PtTpIwkSlP2/jDwdR/9w1TkFzhNbJ81ki6LAEQM5mSwoTTnbIIecg==", "requires": { - "@algolia/requester-common": "4.5.1" + "@algolia/requester-common": "4.8.3" } }, "@algolia/requester-common": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.5.1.tgz", - "integrity": "sha512-bPCiLvhHKXaka7f5FLtheChToz0yHVhvza64naFJRRh/3kC0nvyrvQ0ogjiydiSrGIfdNDyyTVfKGdk4gS5gyA==" + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.8.3.tgz", + "integrity": "sha512-+Yo9vBkofoKR1SCqqtMnmnfq9yt/BiaDewY/6bYSMNxSYCnu2Fw1JKSIaf/4zos09PMSsxGpLohZwGas3+0GDQ==" }, "@algolia/requester-node-http": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.5.1.tgz", - "integrity": "sha512-BfFc2h9eQOKu1gGs3DtQO7GrVZW/rxUgpJVLja4UVQyGplJyTCrFgkTyfl+8rb3MkNgA/S2LNo7cKNSPfpqeAQ==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.8.3.tgz", + "integrity": "sha512-k2fiKIeMIFqgC01FnzII6kqC2GQBAfbNaUX4k7QCPa6P8t4sp2xE6fImOUiztLnnL3C9X9ZX6Fw3L+cudi7jvQ==", "requires": { - "@algolia/requester-common": "4.5.1" + "@algolia/requester-common": "4.8.3" } }, "@algolia/transporter": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.5.1.tgz", - "integrity": "sha512-asPDNToDAPhH0tM6qKGTn1l0wTlNUbekpa1ifZ6v+qhSjo3VdqGyp+2VeciJOBW/wVHXh3HUbAcycvLERRlCLg==", + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.8.3.tgz", + "integrity": "sha512-nU7fy2iU8snxATlsks0MjMyv97QJWQmOVwTjDc+KZ4+nue8CLcgm4LA4dsTBqvxeCQIoEtt3n72GwXcaqiJSjQ==", "requires": { - "@algolia/cache-common": "4.5.1", - "@algolia/logger-common": "4.5.1", - "@algolia/requester-common": "4.5.1" + "@algolia/cache-common": "4.8.3", + "@algolia/logger-common": "4.8.3", + "@algolia/requester-common": "4.8.3" } }, "@babel/code-frame": { @@ -128,49 +128,36 @@ } }, "@babel/compat-data": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.11.0.tgz", - "integrity": "sha512-TPSvJfv73ng0pfnEOh17bYMPQbI95+nGWc71Ss4vZdRBHTDqmM9Z8ZV4rYz8Ks7sfzc95n30k6ODIq5UGnXcYQ==", - "requires": { - "browserslist": "^4.12.0", - "invariant": "^2.2.4", - "semver": "^5.5.0" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } - } + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.12.7.tgz", + "integrity": "sha512-YaxPMGs/XIWtYqrdEOZOCPsVWfEoriXopnsz3/i7apYPXQ3698UFhS6dVT1KN5qOsWmVgw/FOrmQgpRaZayGsw==" }, "@babel/core": { - "version": "7.11.6", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.11.6.tgz", - "integrity": "sha512-Wpcv03AGnmkgm6uS6k8iwhIwTrcP0m17TL1n1sy7qD0qelDu4XNeW0dN0mHfa+Gei211yDaLoEe/VlbXQzM4Bg==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.10.tgz", + "integrity": "sha512-eTAlQKq65zHfkHZV0sIVODCPGVgoo1HdBlbSLi9CqOzuZanMv2ihzY+4paiKr1mH+XmYESMAmJ/dpZ68eN6d8w==", "requires": { "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.11.6", - "@babel/helper-module-transforms": "^7.11.0", - "@babel/helpers": "^7.10.4", - "@babel/parser": "^7.11.5", - "@babel/template": "^7.10.4", - "@babel/traverse": "^7.11.5", - "@babel/types": "^7.11.5", + "@babel/generator": "^7.12.10", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.5", + "@babel/parser": "^7.12.10", + "@babel/template": "^7.12.7", + "@babel/traverse": "^7.12.10", + "@babel/types": "^7.12.10", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.1", "json5": "^2.1.2", "lodash": "^4.17.19", - "resolve": "^1.3.2", "semver": "^5.4.1", "source-map": "^0.5.0" }, "dependencies": { "debug": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", - "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "requires": { "ms": "2.1.2" } @@ -201,11 +188,11 @@ } }, "@babel/generator": { - "version": "7.11.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.11.6.tgz", - "integrity": "sha512-DWtQ1PV3r+cLbySoHrwn9RWEgKMBLLma4OBQloPRyDYvc5msJM9kvTLo1YnlJd1P/ZuKbdli3ijr5q3FvAF3uA==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.10.tgz", + "integrity": "sha512-6mCdfhWgmqLdtTkhXjnIz0LcdVCd26wS2JXRtj2XY0u5klDsXBREA/pG5NVOuVnF2LUrBGNFtQkIqqTbblg0ww==", "requires": { - "@babel/types": "^7.11.5", + "@babel/types": "^7.12.10", "jsesc": "^2.5.1", "source-map": "^0.5.0" }, @@ -218,11 +205,11 @@ } }, "@babel/helper-annotate-as-pure": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.10.4.tgz", - "integrity": "sha512-XQlqKQP4vXFB7BN8fEEerrmYvHp3fK/rBkRFz9jaJbzK0B1DSfej9Kc7ZzE8Z/OnId1jpJdNAZ3BFQjWG68rcA==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.10.tgz", + "integrity": "sha512-XplmVbC1n+KY6jL8/fgLVXXUauDIB+lD5+GsQEh6F6GBF1dq1qy4DP4yXWzDKcoqXB3X58t61e85Fitoww4JVQ==", "requires": { - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.10" } }, "@babel/helper-builder-binary-assignment-operator-visitor": { @@ -235,14 +222,13 @@ } }, "@babel/helper-compilation-targets": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.10.4.tgz", - "integrity": "sha512-a3rYhlsGV0UHNDvrtOXBg8/OpfV0OKTkxKPzIplS1zpx7CygDcWWxckxZeDd3gzPzC4kUT0A4nVFDK0wGMh4MQ==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.12.5.tgz", + "integrity": "sha512-+qH6NrscMolUlzOYngSBMIOQpKUGPPsc61Bu5W10mg84LxZ7cmvnBHzARKbDoFxVvqqAbj6Tg6N7bSrWSPXMyw==", "requires": { - "@babel/compat-data": "^7.10.4", - "browserslist": "^4.12.0", - "invariant": "^2.2.4", - "levenary": "^1.1.1", + "@babel/compat-data": "^7.12.5", + "@babel/helper-validator-option": "^7.12.1", + "browserslist": "^4.14.5", "semver": "^5.5.0" }, "dependencies": { @@ -254,26 +240,24 @@ } }, "@babel/helper-create-class-features-plugin": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.10.5.tgz", - "integrity": "sha512-0nkdeijB7VlZoLT3r/mY3bUkw3T8WG/hNw+FATs/6+pG2039IJWjTYL0VTISqsNHMUTEnwbVnc89WIJX9Qed0A==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.12.1.tgz", + "integrity": "sha512-hkL++rWeta/OVOBTRJc9a5Azh5mt5WgZUGAKMD8JM141YsE08K//bp1unBBieO6rUKkIPyUE0USQ30jAy3Sk1w==", "requires": { "@babel/helper-function-name": "^7.10.4", - "@babel/helper-member-expression-to-functions": "^7.10.5", + "@babel/helper-member-expression-to-functions": "^7.12.1", "@babel/helper-optimise-call-expression": "^7.10.4", - "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-replace-supers": "^7.10.4", + "@babel/helper-replace-supers": "^7.12.1", "@babel/helper-split-export-declaration": "^7.10.4" } }, "@babel/helper-create-regexp-features-plugin": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.10.4.tgz", - "integrity": "sha512-2/hu58IEPKeoLF45DBwx3XFqsbCXmkdAay4spVr2x0jYgRxrSNp+ePwvSsy9g6YSaNDcKIQVPXk1Ov8S2edk2g==", + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.7.tgz", + "integrity": "sha512-idnutvQPdpbduutvi3JVfEgcVIHooQnhvhx0Nk9isOINOIGYkZea1Pk2JlJRiUnMefrlvr0vkByATBY/mB4vjQ==", "requires": { "@babel/helper-annotate-as-pure": "^7.10.4", - "@babel/helper-regex": "^7.10.4", - "regexpu-core": "^4.7.0" + "regexpu-core": "^4.7.1" } }, "@babel/helper-define-map": { @@ -287,11 +271,11 @@ } }, "@babel/helper-explode-assignable-expression": { - "version": "7.11.4", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.11.4.tgz", - "integrity": "sha512-ux9hm3zR4WV1Y3xXxXkdG/0gxF9nvI0YVmKVhvK9AfMoaQkemL3sJpXw+Xbz65azo8qJiEz2XVDUpK3KYhH3ZQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.12.1.tgz", + "integrity": "sha512-dmUwH8XmlrUpVqgtZ737tK88v07l840z9j3OEhCLwKTkjlvKpfqXVIZ0wpK3aeOxspwGrf/5AP5qLx4rO3w5rA==", "requires": { - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.1" } }, "@babel/helper-function-name": { @@ -305,11 +289,11 @@ } }, "@babel/helper-get-function-arity": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.4.tgz", - "integrity": "sha512-EkN3YDB+SRDgiIUnNgcmiD361ti+AVbL3f3Henf6dqqUyr5dMsorno0lJWJuLhDhkI5sYEpgj6y9kB8AOU1I2A==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.10.tgz", + "integrity": "sha512-mm0n5BPjR06wh9mPQaDdXWDoll/j5UpCAPl1x8fS71GHm7HA6Ua2V4ylG1Ju8lvcTOietbPNNPaSilKj+pj+Ag==", "requires": { - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.10" } }, "@babel/helper-hoist-variables": { @@ -321,41 +305,43 @@ } }, "@babel/helper-member-expression-to-functions": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.11.0.tgz", - "integrity": "sha512-JbFlKHFntRV5qKw3YC0CvQnDZ4XMwgzzBbld7Ly4Mj4cbFy3KywcR8NtNctRToMWJOVvLINJv525Gd6wwVEx/Q==", + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.7.tgz", + "integrity": "sha512-DCsuPyeWxeHgh1Dus7APn7iza42i/qXqiFPWyBDdOFtvS581JQePsc1F/nD+fHrcswhLlRc2UpYS1NwERxZhHw==", "requires": { - "@babel/types": "^7.11.0" + "@babel/types": "^7.12.7" } }, "@babel/helper-module-imports": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.10.4.tgz", - "integrity": "sha512-nEQJHqYavI217oD9+s5MUBzk6x1IlvoS9WTPfgG43CbMEeStE0v+r+TucWdx8KFGowPGvyOkDT9+7DHedIDnVw==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.5.tgz", + "integrity": "sha512-SR713Ogqg6++uexFRORf/+nPXMmWIn80TALu0uaFb+iQIUoR7bOC7zBWyzBs5b3tBBJXuyD0cRu1F15GyzjOWA==", "requires": { - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.5" } }, "@babel/helper-module-transforms": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.11.0.tgz", - "integrity": "sha512-02EVu8COMuTRO1TAzdMtpBPbe6aQ1w/8fePD2YgQmxZU4gpNWaL9gK3Jp7dxlkUlUCJOTaSeA+Hrm1BRQwqIhg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.1.tgz", + "integrity": "sha512-QQzehgFAZ2bbISiCpmVGfiGux8YVFXQ0abBic2Envhej22DVXV9nCFaS5hIQbkyo1AdGb+gNME2TSh3hYJVV/w==", "requires": { - "@babel/helper-module-imports": "^7.10.4", - "@babel/helper-replace-supers": "^7.10.4", - "@babel/helper-simple-access": "^7.10.4", + "@babel/helper-module-imports": "^7.12.1", + "@babel/helper-replace-supers": "^7.12.1", + "@babel/helper-simple-access": "^7.12.1", "@babel/helper-split-export-declaration": "^7.11.0", + "@babel/helper-validator-identifier": "^7.10.4", "@babel/template": "^7.10.4", - "@babel/types": "^7.11.0", + "@babel/traverse": "^7.12.1", + "@babel/types": "^7.12.1", "lodash": "^4.17.19" } }, "@babel/helper-optimise-call-expression": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.4.tgz", - "integrity": "sha512-n3UGKY4VXwXThEiKrgRAoVPBMqeoPgHVqiHZOanAJCG9nQUL2pLRQirUzl0ioKclHGpGqRgIOkgcIJaIWLpygg==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.10.tgz", + "integrity": "sha512-4tpbU0SrSTjjt65UMWSrUOPZTsgvPgGG4S8QSTNHacKzpS51IVWGDj0yCwyeZND/i+LSN2g/O63jEXEWm49sYQ==", "requires": { - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.10" } }, "@babel/helper-plugin-utils": { @@ -363,51 +349,41 @@ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" }, - "@babel/helper-regex": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/helper-regex/-/helper-regex-7.10.5.tgz", - "integrity": "sha512-68kdUAzDrljqBrio7DYAEgCoJHxppJOERHOgOrDN7WjOzP0ZQ1LsSDRXcemzVZaLvjaJsJEESb6qt+znNuENDg==", - "requires": { - "lodash": "^4.17.19" - } - }, "@babel/helper-remap-async-to-generator": { - "version": "7.11.4", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.11.4.tgz", - "integrity": "sha512-tR5vJ/vBa9wFy3m5LLv2faapJLnDFxNWff2SAYkSE4rLUdbp7CdObYFgI7wK4T/Mj4UzpjPwzR8Pzmr5m7MHGA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.12.1.tgz", + "integrity": "sha512-9d0KQCRM8clMPcDwo8SevNs+/9a8yWVVmaE80FGJcEP8N1qToREmWEGnBn8BUlJhYRFz6fqxeRL1sl5Ogsed7A==", "requires": { "@babel/helper-annotate-as-pure": "^7.10.4", "@babel/helper-wrap-function": "^7.10.4", - "@babel/template": "^7.10.4", - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.1" } }, "@babel/helper-replace-supers": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.10.4.tgz", - "integrity": "sha512-sPxZfFXocEymYTdVK1UNmFPBN+Hv5mJkLPsYWwGBxZAxaWfFu+xqp7b6qWD0yjNuNL2VKc6L5M18tOXUP7NU0A==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.5.tgz", + "integrity": "sha512-5YILoed0ZyIpF4gKcpZitEnXEJ9UoDRki1Ey6xz46rxOzfNMAhVIJMoune1hmPVxh40LRv1+oafz7UsWX+vyWA==", "requires": { - "@babel/helper-member-expression-to-functions": "^7.10.4", + "@babel/helper-member-expression-to-functions": "^7.12.1", "@babel/helper-optimise-call-expression": "^7.10.4", - "@babel/traverse": "^7.10.4", - "@babel/types": "^7.10.4" + "@babel/traverse": "^7.12.5", + "@babel/types": "^7.12.5" } }, "@babel/helper-simple-access": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.10.4.tgz", - "integrity": "sha512-0fMy72ej/VEvF8ULmX6yb5MtHG4uH4Dbd6I/aHDb/JVg0bbivwt9Wg+h3uMvX+QSFtwr5MeItvazbrc4jtRAXw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.1.tgz", + "integrity": "sha512-OxBp7pMrjVewSSC8fXDFrHrBcJATOOFssZwv16F3/6Xtc138GHybBfPbm9kfiqQHKhYQrlamWILwlDCeyMFEaA==", "requires": { - "@babel/template": "^7.10.4", - "@babel/types": "^7.10.4" + "@babel/types": "^7.12.1" } }, "@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.11.0.tgz", - "integrity": "sha512-0XIdiQln4Elglgjbwo9wuJpL/K7AGCY26kmEt0+pRP0TAj4jjyNq1MjoRvikrTVqKcx4Gysxt4cXvVFXP/JO2Q==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz", + "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==", "requires": { - "@babel/types": "^7.11.0" + "@babel/types": "^7.12.1" } }, "@babel/helper-split-export-declaration": { @@ -423,10 +399,15 @@ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==" }, + "@babel/helper-validator-option": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.1.tgz", + "integrity": "sha512-YpJabsXlJVWP0USHjnC/AQDTLlZERbON577YUVO/wLpqyj6HAtVYnWaQaN0iUN+1/tWn3c+uKKXjRut5115Y2A==" + }, "@babel/helper-wrap-function": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.10.4.tgz", - "integrity": "sha512-6py45WvEF0MhiLrdxtRjKjufwLL1/ob2qDJgg5JgNdojBAZSAKnAjkyOCNug6n+OBl4VW76XjvgSFTdaMcW0Ug==", + "version": "7.12.3", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.12.3.tgz", + "integrity": "sha512-Cvb8IuJDln3rs6tzjW3Y8UeelAOdnpB8xtQ4sme2MSZ9wOxrbThporC0y/EtE16VAtoyEfLM404Xr1e0OOp+ow==", "requires": { "@babel/helper-function-name": "^7.10.4", "@babel/template": "^7.10.4", @@ -435,13 +416,13 @@ } }, "@babel/helpers": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.10.4.tgz", - "integrity": "sha512-L2gX/XeUONeEbI78dXSrJzGdz4GQ+ZTA/aazfUsFaWjSe95kiCuOZ5HsXvkiw3iwF+mFHSRUfJU8t6YavocdXA==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.5.tgz", + "integrity": "sha512-lgKGMQlKqA8meJqKsW6rUnc4MdUk35Ln0ATDqdM1a/UpARODdI4j5Y5lVfUScnSNkJcdCRAaWkspykNoFg9sJA==", "requires": { "@babel/template": "^7.10.4", - "@babel/traverse": "^7.10.4", - "@babel/types": "^7.10.4" + "@babel/traverse": "^7.12.5", + "@babel/types": "^7.12.5" } }, "@babel/highlight": { @@ -455,137 +436,137 @@ } }, "@babel/parser": { - "version": "7.11.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.11.5.tgz", - "integrity": "sha512-X9rD8qqm695vgmeaQ4fvz/o3+Wk4ZzQvSHkDBgpYKxpD4qTAUm88ZKtHkVqIOsYFFbIQ6wQYhC6q7pjqVK0E0Q==" + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.10.tgz", + "integrity": "sha512-PJdRPwyoOqFAWfLytxrWwGrAxghCgh/yTNCYciOz8QgjflA7aZhECPZAa2VUedKg2+QMWkI0L9lynh2SNmNEgA==" }, "@babel/plugin-proposal-async-generator-functions": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.10.5.tgz", - "integrity": "sha512-cNMCVezQbrRGvXJwm9fu/1sJj9bHdGAgKodZdLqOQIpfoH3raqmRPBM17+lh7CzhiKRRBrGtZL9WcjxSoGYUSg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.12.1.tgz", + "integrity": "sha512-d+/o30tJxFxrA1lhzJqiUcEJdI6jKlNregCv5bASeGf2Q4MXmnwH7viDo7nhx1/ohf09oaH8j1GVYG/e3Yqk6A==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-remap-async-to-generator": "^7.10.4", + "@babel/helper-remap-async-to-generator": "^7.12.1", "@babel/plugin-syntax-async-generators": "^7.8.0" } }, "@babel/plugin-proposal-class-properties": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.10.4.tgz", - "integrity": "sha512-vhwkEROxzcHGNu2mzUC0OFFNXdZ4M23ib8aRRcJSsW8BZK9pQMD7QB7csl97NBbgGZO7ZyHUyKDnxzOaP4IrCg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.12.1.tgz", + "integrity": "sha512-cKp3dlQsFsEs5CWKnN7BnSHOd0EOW8EKpEjkoz1pO2E5KzIDNV9Ros1b0CnmbVgAGXJubOYVBOGCT1OmJwOI7w==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.10.4", + "@babel/helper-create-class-features-plugin": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-proposal-decorators": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.10.5.tgz", - "integrity": "sha512-Sc5TAQSZuLzgY0664mMDn24Vw2P8g/VhyLyGPaWiHahhgLqeZvcGeyBZOrJW0oSKIK2mvQ22a1ENXBIQLhrEiQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.12.1.tgz", + "integrity": "sha512-knNIuusychgYN8fGJHONL0RbFxLGawhXOJNLBk75TniTsZZeA+wdkDuv6wp4lGwzQEKjZi6/WYtnb3udNPmQmQ==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.10.5", + "@babel/helper-create-class-features-plugin": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4", - "@babel/plugin-syntax-decorators": "^7.10.4" + "@babel/plugin-syntax-decorators": "^7.12.1" } }, "@babel/plugin-proposal-dynamic-import": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.10.4.tgz", - "integrity": "sha512-up6oID1LeidOOASNXgv/CFbgBqTuKJ0cJjz6An5tWD+NVBNlp3VNSBxv2ZdU7SYl3NxJC7agAQDApZusV6uFwQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.12.1.tgz", + "integrity": "sha512-a4rhUSZFuq5W8/OO8H7BL5zspjnc1FLd9hlOxIK/f7qG4a0qsqk8uvF/ywgBA8/OmjsapjpvaEOYItfGG1qIvQ==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-dynamic-import": "^7.8.0" } }, "@babel/plugin-proposal-export-namespace-from": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.10.4.tgz", - "integrity": "sha512-aNdf0LY6/3WXkhh0Fdb6Zk9j1NMD8ovj3F6r0+3j837Pn1S1PdNtcwJ5EG9WkVPNHPxyJDaxMaAOVq4eki0qbg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.1.tgz", + "integrity": "sha512-6CThGf0irEkzujYS5LQcjBx8j/4aQGiVv7J9+2f7pGfxqyKh3WnmVJYW3hdrQjyksErMGBPQrCnHfOtna+WLbw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-export-namespace-from": "^7.8.3" } }, "@babel/plugin-proposal-json-strings": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.10.4.tgz", - "integrity": "sha512-fCL7QF0Jo83uy1K0P2YXrfX11tj3lkpN7l4dMv9Y9VkowkhkQDwFHFd8IiwyK5MZjE8UpbgokkgtcReH88Abaw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.12.1.tgz", + "integrity": "sha512-GoLDUi6U9ZLzlSda2Df++VSqDJg3CG+dR0+iWsv6XRw1rEq+zwt4DirM9yrxW6XWaTpmai1cWJLMfM8qQJf+yw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-json-strings": "^7.8.0" } }, "@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.11.0.tgz", - "integrity": "sha512-/f8p4z+Auz0Uaf+i8Ekf1iM7wUNLcViFUGiPxKeXvxTSl63B875YPiVdUDdem7hREcI0E0kSpEhS8tF5RphK7Q==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.12.1.tgz", + "integrity": "sha512-k8ZmVv0JU+4gcUGeCDZOGd0lCIamU/sMtIiX3UWnUc5yzgq6YUGyEolNYD+MLYKfSzgECPcqetVcJP9Afe/aCA==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" } }, "@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.10.4.tgz", - "integrity": "sha512-wq5n1M3ZUlHl9sqT2ok1T2/MTt6AXE0e1Lz4WzWBr95LsAZ5qDXe4KnFuauYyEyLiohvXFMdbsOTMyLZs91Zlw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.12.1.tgz", + "integrity": "sha512-nZY0ESiaQDI1y96+jk6VxMOaL4LPo/QDHBqL+SF3/vl6dHkTwHlOI8L4ZwuRBHgakRBw5zsVylel7QPbbGuYgg==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0" } }, "@babel/plugin-proposal-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.10.4.tgz", - "integrity": "sha512-73/G7QoRoeNkLZFxsoCCvlg4ezE4eM+57PnOqgaPOozd5myfj7p0muD1mRVJvbUWbOzD+q3No2bWbaKy+DJ8DA==", + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.7.tgz", + "integrity": "sha512-8c+uy0qmnRTeukiGsjLGy6uVs/TFjJchGXUeBqlG4VWYOdJWkhhVPdQ3uHwbmalfJwv2JsV0qffXP4asRfL2SQ==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-numeric-separator": "^7.10.4" } }, "@babel/plugin-proposal-object-rest-spread": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.11.0.tgz", - "integrity": "sha512-wzch41N4yztwoRw0ak+37wxwJM2oiIiy6huGCoqkvSTA9acYWcPfn9Y4aJqmFFJ70KTJUu29f3DQ43uJ9HXzEA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", + "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.0", - "@babel/plugin-transform-parameters": "^7.10.4" + "@babel/plugin-transform-parameters": "^7.12.1" } }, "@babel/plugin-proposal-optional-catch-binding": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.10.4.tgz", - "integrity": "sha512-LflT6nPh+GK2MnFiKDyLiqSqVHkQnVf7hdoAvyTnnKj9xB3docGRsdPuxp6qqqW19ifK3xgc9U5/FwrSaCNX5g==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.12.1.tgz", + "integrity": "sha512-hFvIjgprh9mMw5v42sJWLI1lzU5L2sznP805zeT6rySVRA0Y18StRhDqhSxlap0oVgItRsB6WSROp4YnJTJz0g==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-optional-catch-binding": "^7.8.0" } }, "@babel/plugin-proposal-optional-chaining": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.11.0.tgz", - "integrity": "sha512-v9fZIu3Y8562RRwhm1BbMRxtqZNFmFA2EG+pT2diuU8PT3H6T/KXoZ54KgYisfOFZHV6PfvAiBIZ9Rcz+/JCxA==", + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.12.7.tgz", + "integrity": "sha512-4ovylXZ0PWmwoOvhU2vhnzVNnm88/Sm9nx7V8BPgMvAzn5zDou3/Awy0EjglyubVHasJj+XCEkr/r1X3P5elCA==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-skip-transparent-expression-wrappers": "^7.11.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", "@babel/plugin-syntax-optional-chaining": "^7.8.0" } }, "@babel/plugin-proposal-private-methods": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.10.4.tgz", - "integrity": "sha512-wh5GJleuI8k3emgTg5KkJK6kHNsGEr0uBTDBuQUBJwckk9xs1ez79ioheEVVxMLyPscB0LfkbVHslQqIzWV6Bw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.12.1.tgz", + "integrity": "sha512-mwZ1phvH7/NHK6Kf8LP7MYDogGV+DKB1mryFOEwx5EBNQrosvIczzZFTUmWaeujd5xT6G1ELYWUz3CutMhjE1w==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.10.4", + "@babel/helper-create-class-features-plugin": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.10.4.tgz", - "integrity": "sha512-H+3fOgPnEXFL9zGYtKQe4IDOPKYlZdF1kqFDQRRb8PK4B8af1vAGK04tF5iQAAsui+mHNBQSAtd2/ndEDe9wuA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.1.tgz", + "integrity": "sha512-MYq+l+PvHuw/rKUz1at/vb6nCnQ2gmJBNaM62z0OgH7B2W1D9pvkpYtlti9bGtizNIU1K3zm4bZF9F91efVY0w==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.10.4", + "@babel/helper-create-regexp-features-plugin": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4" } }, @@ -598,17 +579,17 @@ } }, "@babel/plugin-syntax-class-properties": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.10.4.tgz", - "integrity": "sha512-GCSBF7iUle6rNugfURwNmCGG3Z/2+opxAMLs1nND4bhEG5PuxTIggDBoeYYSujAlLtsupzOHYJQgPS3pivwXIA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.1.tgz", + "integrity": "sha512-U40A76x5gTwmESz+qiqssqmeEsKvcSyvtgktrm0uzcARAmM9I1jR221f6Oq+GmHrcD+LvZDag1UTOTe2fL3TeA==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-syntax-decorators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.10.4.tgz", - "integrity": "sha512-2NaoC6fAk2VMdhY1eerkfHV+lVYC1u8b+jmRJISqANCJlTxYy19HGdIkkQtix2UtkcPuPu+IlDgrVseZnU03bw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.1.tgz", + "integrity": "sha512-ir9YW5daRrTYiy9UJ2TzdNIJEZu8KclVzDcfSt4iEmOtwQ4llPtWInNKJyKnVXp1vE4bbVd5S31M/im3mYMO1w==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } @@ -638,9 +619,9 @@ } }, "@babel/plugin-syntax-jsx": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.10.4.tgz", - "integrity": "sha512-KCg9mio9jwiARCB7WAcQ7Y1q+qicILjoK8LP/VkPkEKaf5dkaZZK1EcTe91a3JJlZ3qy6L5s9X52boEYi8DM9g==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } @@ -694,244 +675,243 @@ } }, "@babel/plugin-syntax-top-level-await": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.10.4.tgz", - "integrity": "sha512-ni1brg4lXEmWyafKr0ccFWkJG0CeMt4WV1oyeBW6EFObF4oOHclbkj5cARxAPQyAQ2UTuplJyK4nfkXIMMFvsQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.1.tgz", + "integrity": "sha512-i7ooMZFS+a/Om0crxZodrTzNEPJHZrlMVGMTEpFAj6rYY/bKCddB0Dk/YxfPuYXOopuhKk/e1jV6h+WUU9XN3A==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-arrow-functions": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.10.4.tgz", - "integrity": "sha512-9J/oD1jV0ZCBcgnoFWFq1vJd4msoKb/TCpGNFyyLt0zABdcvgK3aYikZ8HjzB14c26bc7E3Q1yugpwGy2aTPNA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.12.1.tgz", + "integrity": "sha512-5QB50qyN44fzzz4/qxDPQMBCTHgxg3n0xRBLJUmBlLoU/sFvxVWGZF/ZUfMVDQuJUKXaBhbupxIzIfZ6Fwk/0A==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-async-to-generator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.10.4.tgz", - "integrity": "sha512-F6nREOan7J5UXTLsDsZG3DXmZSVofr2tGNwfdrVwkDWHfQckbQXnXSPfD7iO+c/2HGqycwyLST3DnZ16n+cBJQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.12.1.tgz", + "integrity": "sha512-SDtqoEcarK1DFlRJ1hHRY5HvJUj5kX4qmtpMAm2QnhOlyuMC4TMdCRgW6WXpv93rZeYNeLP22y8Aq2dbcDRM1A==", "requires": { - "@babel/helper-module-imports": "^7.10.4", + "@babel/helper-module-imports": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-remap-async-to-generator": "^7.10.4" + "@babel/helper-remap-async-to-generator": "^7.12.1" } }, "@babel/plugin-transform-block-scoped-functions": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.10.4.tgz", - "integrity": "sha512-WzXDarQXYYfjaV1szJvN3AD7rZgZzC1JtjJZ8dMHUyiK8mxPRahynp14zzNjU3VkPqPsO38CzxiWO1c9ARZ8JA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.1.tgz", + "integrity": "sha512-5OpxfuYnSgPalRpo8EWGPzIYf0lHBWORCkj5M0oLBwHdlux9Ri36QqGW3/LR13RSVOAoUUMzoPI/jpE4ABcHoA==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-block-scoping": { - "version": "7.11.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.11.1.tgz", - "integrity": "sha512-00dYeDE0EVEHuuM+26+0w/SCL0BH2Qy7LwHuI4Hi4MH5gkC8/AqMN5uWFJIsoXZrAphiMm1iXzBw6L2T+eA0ew==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.1.tgz", + "integrity": "sha512-zJyAC9sZdE60r1nVQHblcfCj29Dh2Y0DOvlMkcqSo0ckqjiCwNiUezUKw+RjOCwGfpLRwnAeQ2XlLpsnGkvv9w==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-classes": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.10.4.tgz", - "integrity": "sha512-2oZ9qLjt161dn1ZE0Ms66xBncQH4In8Sqw1YWgBUZuGVJJS5c0OFZXL6dP2MRHrkU/eKhWg8CzFJhRQl50rQxA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.12.1.tgz", + "integrity": "sha512-/74xkA7bVdzQTBeSUhLLJgYIcxw/dpEpCdRDiHgPJ3Mv6uC11UhjpOhl72CgqbBCmt1qtssCyB2xnJm1+PFjog==", "requires": { "@babel/helper-annotate-as-pure": "^7.10.4", "@babel/helper-define-map": "^7.10.4", "@babel/helper-function-name": "^7.10.4", "@babel/helper-optimise-call-expression": "^7.10.4", "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-replace-supers": "^7.10.4", + "@babel/helper-replace-supers": "^7.12.1", "@babel/helper-split-export-declaration": "^7.10.4", "globals": "^11.1.0" } }, "@babel/plugin-transform-computed-properties": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.10.4.tgz", - "integrity": "sha512-JFwVDXcP/hM/TbyzGq3l/XWGut7p46Z3QvqFMXTfk6/09m7xZHJUN9xHfsv7vqqD4YnfI5ueYdSJtXqqBLyjBw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.12.1.tgz", + "integrity": "sha512-vVUOYpPWB7BkgUWPo4C44mUQHpTZXakEqFjbv8rQMg7TC6S6ZhGZ3otQcRH6u7+adSlE5i0sp63eMC/XGffrzg==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-destructuring": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.10.4.tgz", - "integrity": "sha512-+WmfvyfsyF603iPa6825mq6Qrb7uLjTOsa3XOFzlYcYDHSS4QmpOWOL0NNBY5qMbvrcf3tq0Cw+v4lxswOBpgA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.12.1.tgz", + "integrity": "sha512-fRMYFKuzi/rSiYb2uRLiUENJOKq4Gnl+6qOv5f8z0TZXg3llUwUhsNNwrwaT/6dUhJTzNpBr+CUvEWBtfNY1cw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-dotall-regex": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.10.4.tgz", - "integrity": "sha512-ZEAVvUTCMlMFAbASYSVQoxIbHm2OkG2MseW6bV2JjIygOjdVv8tuxrCTzj1+Rynh7ODb8GivUy7dzEXzEhuPaA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.1.tgz", + "integrity": "sha512-B2pXeRKoLszfEW7J4Hg9LoFaWEbr/kzo3teWHmtFCszjRNa/b40f9mfeqZsIDLLt/FjwQ6pz/Gdlwy85xNckBA==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.10.4", + "@babel/helper-create-regexp-features-plugin": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-duplicate-keys": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.10.4.tgz", - "integrity": "sha512-GL0/fJnmgMclHiBTTWXNlYjYsA7rDrtsazHG6mglaGSTh0KsrW04qml+Bbz9FL0LcJIRwBWL5ZqlNHKTkU3xAA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.1.tgz", + "integrity": "sha512-iRght0T0HztAb/CazveUpUQrZY+aGKKaWXMJ4uf9YJtqxSUe09j3wteztCUDRHs+SRAL7yMuFqUsLoAKKzgXjw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-exponentiation-operator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.10.4.tgz", - "integrity": "sha512-S5HgLVgkBcRdyQAHbKj+7KyuWx8C6t5oETmUuwz1pt3WTWJhsUV0WIIXuVvfXMxl/QQyHKlSCNNtaIamG8fysw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.1.tgz", + "integrity": "sha512-7tqwy2bv48q+c1EHbXK0Zx3KXd2RVQp6OC7PbwFNt/dPTAV3Lu5sWtWuAj8owr5wqtWnqHfl2/mJlUmqkChKug==", "requires": { "@babel/helper-builder-binary-assignment-operator-visitor": "^7.10.4", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-for-of": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.10.4.tgz", - "integrity": "sha512-ItdQfAzu9AlEqmusA/65TqJ79eRcgGmpPPFvBnGILXZH975G0LNjP1yjHvGgfuCxqrPPueXOPe+FsvxmxKiHHQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.12.1.tgz", + "integrity": "sha512-Zaeq10naAsuHo7heQvyV0ptj4dlZJwZgNAtBYBnu5nNKJoW62m0zKcIEyVECrUKErkUkg6ajMy4ZfnVZciSBhg==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-function-name": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.10.4.tgz", - "integrity": "sha512-OcDCq2y5+E0dVD5MagT5X+yTRbcvFjDI2ZVAottGH6tzqjx/LKpgkUepu3hp/u4tZBzxxpNGwLsAvGBvQ2mJzg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.1.tgz", + "integrity": "sha512-JF3UgJUILoFrFMEnOJLJkRHSk6LUSXLmEFsA23aR2O5CSLUxbeUX1IZ1YQ7Sn0aXb601Ncwjx73a+FVqgcljVw==", "requires": { "@babel/helper-function-name": "^7.10.4", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-literals": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.10.4.tgz", - "integrity": "sha512-Xd/dFSTEVuUWnyZiMu76/InZxLTYilOSr1UlHV+p115Z/Le2Fi1KXkJUYz0b42DfndostYlPub3m8ZTQlMaiqQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.1.tgz", + "integrity": "sha512-+PxVGA+2Ag6uGgL0A5f+9rklOnnMccwEBzwYFL3EUaKuiyVnUipyXncFcfjSkbimLrODoqki1U9XxZzTvfN7IQ==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-member-expression-literals": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.10.4.tgz", - "integrity": "sha512-0bFOvPyAoTBhtcJLr9VcwZqKmSjFml1iVxvPL0ReomGU53CX53HsM4h2SzckNdkQcHox1bpAqzxBI1Y09LlBSw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.1.tgz", + "integrity": "sha512-1sxePl6z9ad0gFMB9KqmYofk34flq62aqMt9NqliS/7hPEpURUCMbyHXrMPlo282iY7nAvUB1aQd5mg79UD9Jg==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-modules-amd": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.10.5.tgz", - "integrity": "sha512-elm5uruNio7CTLFItVC/rIzKLfQ17+fX7EVz5W0TMgIHFo1zY0Ozzx+lgwhL4plzl8OzVn6Qasx5DeEFyoNiRw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.12.1.tgz", + "integrity": "sha512-tDW8hMkzad5oDtzsB70HIQQRBiTKrhfgwC/KkJeGsaNFTdWhKNt/BiE8c5yj19XiGyrxpbkOfH87qkNg1YGlOQ==", "requires": { - "@babel/helper-module-transforms": "^7.10.5", + "@babel/helper-module-transforms": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-commonjs": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.10.4.tgz", - "integrity": "sha512-Xj7Uq5o80HDLlW64rVfDBhao6OX89HKUmb+9vWYaLXBZOma4gA6tw4Ni1O5qVDoZWUV0fxMYA0aYzOawz0l+1w==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.12.1.tgz", + "integrity": "sha512-dY789wq6l0uLY8py9c1B48V8mVL5gZh/+PQ5ZPrylPYsnAvnEMjqsUXkuoDVPeVK+0VyGar+D08107LzDQ6pag==", "requires": { - "@babel/helper-module-transforms": "^7.10.4", + "@babel/helper-module-transforms": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-simple-access": "^7.10.4", + "@babel/helper-simple-access": "^7.12.1", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-systemjs": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.10.5.tgz", - "integrity": "sha512-f4RLO/OL14/FP1AEbcsWMzpbUz6tssRaeQg11RH1BP/XnPpRoVwgeYViMFacnkaw4k4wjRSjn3ip1Uw9TaXuMw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.12.1.tgz", + "integrity": "sha512-Hn7cVvOavVh8yvW6fLwveFqSnd7rbQN3zJvoPNyNaQSvgfKmDBO9U1YL9+PCXGRlZD9tNdWTy5ACKqMuzyn32Q==", "requires": { "@babel/helper-hoist-variables": "^7.10.4", - "@babel/helper-module-transforms": "^7.10.5", + "@babel/helper-module-transforms": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-validator-identifier": "^7.10.4", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-umd": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.10.4.tgz", - "integrity": "sha512-mohW5q3uAEt8T45YT7Qc5ws6mWgJAaL/8BfWD9Dodo1A3RKWli8wTS+WiQ/knF+tXlPirW/1/MqzzGfCExKECA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.12.1.tgz", + "integrity": "sha512-aEIubCS0KHKM0zUos5fIoQm+AZUMt1ZvMpqz0/H5qAQ7vWylr9+PLYurT+Ic7ID/bKLd4q8hDovaG3Zch2uz5Q==", "requires": { - "@babel/helper-module-transforms": "^7.10.4", + "@babel/helper-module-transforms": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.10.4.tgz", - "integrity": "sha512-V6LuOnD31kTkxQPhKiVYzYC/Jgdq53irJC/xBSmqcNcqFGV+PER4l6rU5SH2Vl7bH9mLDHcc0+l9HUOe4RNGKA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.1.tgz", + "integrity": "sha512-tB43uQ62RHcoDp9v2Nsf+dSM8sbNodbEicbQNA53zHz8pWUhsgHSJCGpt7daXxRydjb0KnfmB+ChXOv3oADp1Q==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.10.4" + "@babel/helper-create-regexp-features-plugin": "^7.12.1" } }, "@babel/plugin-transform-new-target": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.10.4.tgz", - "integrity": "sha512-YXwWUDAH/J6dlfwqlWsztI2Puz1NtUAubXhOPLQ5gjR/qmQ5U96DY4FQO8At33JN4XPBhrjB8I4eMmLROjjLjw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.1.tgz", + "integrity": "sha512-+eW/VLcUL5L9IvJH7rT1sT0CzkdUTvPrXC2PXTn/7z7tXLBuKvezYbGdxD5WMRoyvyaujOq2fWoKl869heKjhw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-object-super": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.10.4.tgz", - "integrity": "sha512-5iTw0JkdRdJvr7sY0vHqTpnruUpTea32JHmq/atIWqsnNussbRzjEDyWep8UNztt1B5IusBYg8Irb0bLbiEBCQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.1.tgz", + "integrity": "sha512-AvypiGJH9hsquNUn+RXVcBdeE3KHPZexWRdimhuV59cSoOt5kFBmqlByorAeUlGG2CJWd0U+4ZtNKga/TB0cAw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-replace-supers": "^7.10.4" + "@babel/helper-replace-supers": "^7.12.1" } }, "@babel/plugin-transform-parameters": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.10.5.tgz", - "integrity": "sha512-xPHwUj5RdFV8l1wuYiu5S9fqWGM2DrYc24TMvUiRrPVm+SM3XeqU9BcokQX/kEUe+p2RBwy+yoiR1w/Blq6ubw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.12.1.tgz", + "integrity": "sha512-xq9C5EQhdPK23ZeCdMxl8bbRnAgHFrw5EOC3KJUsSylZqdkCaFEXxGSBuTSObOpiiHHNyb82es8M1QYgfQGfNg==", "requires": { - "@babel/helper-get-function-arity": "^7.10.4", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-property-literals": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.10.4.tgz", - "integrity": "sha512-ofsAcKiUxQ8TY4sScgsGeR2vJIsfrzqvFb9GvJ5UdXDzl+MyYCaBj/FGzXuv7qE0aJcjWMILny1epqelnFlz8g==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.1.tgz", + "integrity": "sha512-6MTCR/mZ1MQS+AwZLplX4cEySjCpnIF26ToWo942nqn8hXSm7McaHQNeGx/pt7suI1TWOWMfa/NgBhiqSnX0cQ==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-regenerator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.10.4.tgz", - "integrity": "sha512-3thAHwtor39A7C04XucbMg17RcZ3Qppfxr22wYzZNcVIkPHfpM9J0SO8zuCV6SZa265kxBJSrfKTvDCYqBFXGw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.12.1.tgz", + "integrity": "sha512-gYrHqs5itw6i4PflFX3OdBPMQdPbF4bj2REIUxlMRUFk0/ZOAIpDFuViuxPjUL7YC8UPnf+XG7/utJvqXdPKng==", "requires": { "regenerator-transform": "^0.14.2" } }, "@babel/plugin-transform-reserved-words": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.10.4.tgz", - "integrity": "sha512-hGsw1O6Rew1fkFbDImZIEqA8GoidwTAilwCyWqLBM9f+e/u/sQMQu7uX6dyokfOayRuuVfKOW4O7HvaBWM+JlQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.1.tgz", + "integrity": "sha512-pOnUfhyPKvZpVyBHhSBoX8vfA09b7r00Pmm1sH+29ae2hMTKVmSp4Ztsr8KBKjLjx17H0eJqaRC3bR2iThM54A==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-runtime": { - "version": "7.11.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.11.5.tgz", - "integrity": "sha512-9aIoee+EhjySZ6vY5hnLjigHzunBlscx9ANKutkeWTJTx6m5Rbq6Ic01tLvO54lSusR+BxV7u4UDdCmXv5aagg==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.12.10.tgz", + "integrity": "sha512-xOrUfzPxw7+WDm9igMgQCbO3cJKymX7dFdsgRr1eu9n3KjjyU4pptIXbXPseQDquw+W+RuJEJMHKHNsPNNm3CA==", "requires": { - "@babel/helper-module-imports": "^7.10.4", + "@babel/helper-module-imports": "^7.12.5", "@babel/helper-plugin-utils": "^7.10.4", - "resolve": "^1.8.1", "semver": "^5.5.1" }, "dependencies": { @@ -943,89 +923,88 @@ } }, "@babel/plugin-transform-shorthand-properties": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.10.4.tgz", - "integrity": "sha512-AC2K/t7o07KeTIxMoHneyX90v3zkm5cjHJEokrPEAGEy3UCp8sLKfnfOIGdZ194fyN4wfX/zZUWT9trJZ0qc+Q==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.1.tgz", + "integrity": "sha512-GFZS3c/MhX1OusqB1MZ1ct2xRzX5ppQh2JU1h2Pnfk88HtFTM+TWQqJNfwkmxtPQtb/s1tk87oENfXJlx7rSDw==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-spread": { - "version": "7.11.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.11.0.tgz", - "integrity": "sha512-UwQYGOqIdQJe4aWNyS7noqAnN2VbaczPLiEtln+zPowRNlD+79w3oi2TWfYe0eZgd+gjZCbsydN7lzWysDt+gw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.12.1.tgz", + "integrity": "sha512-vuLp8CP0BE18zVYjsEBZ5xoCecMK6LBMMxYzJnh01rxQRvhNhH1csMMmBfNo5tGpGO+NhdSNW2mzIvBu3K1fng==", "requires": { "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-skip-transparent-expression-wrappers": "^7.11.0" + "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1" } }, "@babel/plugin-transform-sticky-regex": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.10.4.tgz", - "integrity": "sha512-Ddy3QZfIbEV0VYcVtFDCjeE4xwVTJWTmUtorAJkn6u/92Z/nWJNV+mILyqHKrUxXYKA2EoCilgoPePymKL4DvQ==", + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.7.tgz", + "integrity": "sha512-VEiqZL5N/QvDbdjfYQBhruN0HYjSPjC4XkeqW4ny/jNtH9gcbgaqBIXYEZCNnESMAGs0/K/R7oFGMhOyu/eIxg==", "requires": { - "@babel/helper-plugin-utils": "^7.10.4", - "@babel/helper-regex": "^7.10.4" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-template-literals": { - "version": "7.10.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.10.5.tgz", - "integrity": "sha512-V/lnPGIb+KT12OQikDvgSuesRX14ck5FfJXt6+tXhdkJ+Vsd0lDCVtF6jcB4rNClYFzaB2jusZ+lNISDk2mMMw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.12.1.tgz", + "integrity": "sha512-b4Zx3KHi+taXB1dVRBhVJtEPi9h1THCeKmae2qP0YdUHIFhVjtpqqNfxeVAa1xeHVhAy4SbHxEwx5cltAu5apw==", "requires": { - "@babel/helper-annotate-as-pure": "^7.10.4", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-typeof-symbol": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.10.4.tgz", - "integrity": "sha512-QqNgYwuuW0y0H+kUE/GWSR45t/ccRhe14Fs/4ZRouNNQsyd4o3PG4OtHiIrepbM2WKUBDAXKCAK/Lk4VhzTaGA==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.10.tgz", + "integrity": "sha512-JQ6H8Rnsogh//ijxspCjc21YPd3VLVoYtAwv3zQmqAt8YGYUtdo5usNhdl4b9/Vir2kPFZl6n1h0PfUz4hJhaA==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-unicode-escapes": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.10.4.tgz", - "integrity": "sha512-y5XJ9waMti2J+e7ij20e+aH+fho7Wb7W8rNuu72aKRwCHFqQdhkdU2lo3uZ9tQuboEJcUFayXdARhcxLQ3+6Fg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.1.tgz", + "integrity": "sha512-I8gNHJLIc7GdApm7wkVnStWssPNbSRMPtgHdmH3sRM1zopz09UWPS4x5V4n1yz/MIWTVnJ9sp6IkuXdWM4w+2Q==", "requires": { "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-unicode-regex": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.10.4.tgz", - "integrity": "sha512-wNfsc4s8N2qnIwpO/WP2ZiSyjfpTamT2C9V9FDH/Ljub9zw6P3SjkXcFmc0RQUt96k2fmIvtla2MMjgTwIAC+A==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.1.tgz", + "integrity": "sha512-SqH4ClNngh/zGwHZOOQMTD+e8FGWexILV+ePMyiDJttAWRh5dhDL8rcl5lSgU3Huiq6Zn6pWTMvdPAb21Dwdyg==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.10.4", + "@babel/helper-create-regexp-features-plugin": "^7.12.1", "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/preset-env": { - "version": "7.11.5", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.11.5.tgz", - "integrity": "sha512-kXqmW1jVcnB2cdueV+fyBM8estd5mlNfaQi6lwLgRwCby4edpavgbFhiBNjmWA3JpB/yZGSISa7Srf+TwxDQoA==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.12.10.tgz", + "integrity": "sha512-Gz9hnBT/tGeTE2DBNDkD7BiWRELZt+8lSysHuDwmYXUIvtwZl0zI+D6mZgXZX0u8YBlLS4tmai9ONNY9tjRgRA==", "requires": { - "@babel/compat-data": "^7.11.0", - "@babel/helper-compilation-targets": "^7.10.4", - "@babel/helper-module-imports": "^7.10.4", + "@babel/compat-data": "^7.12.7", + "@babel/helper-compilation-targets": "^7.12.5", + "@babel/helper-module-imports": "^7.12.5", "@babel/helper-plugin-utils": "^7.10.4", - "@babel/plugin-proposal-async-generator-functions": "^7.10.4", - "@babel/plugin-proposal-class-properties": "^7.10.4", - "@babel/plugin-proposal-dynamic-import": "^7.10.4", - "@babel/plugin-proposal-export-namespace-from": "^7.10.4", - "@babel/plugin-proposal-json-strings": "^7.10.4", - "@babel/plugin-proposal-logical-assignment-operators": "^7.11.0", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.10.4", - "@babel/plugin-proposal-numeric-separator": "^7.10.4", - "@babel/plugin-proposal-object-rest-spread": "^7.11.0", - "@babel/plugin-proposal-optional-catch-binding": "^7.10.4", - "@babel/plugin-proposal-optional-chaining": "^7.11.0", - "@babel/plugin-proposal-private-methods": "^7.10.4", - "@babel/plugin-proposal-unicode-property-regex": "^7.10.4", + "@babel/helper-validator-option": "^7.12.1", + "@babel/plugin-proposal-async-generator-functions": "^7.12.1", + "@babel/plugin-proposal-class-properties": "^7.12.1", + "@babel/plugin-proposal-dynamic-import": "^7.12.1", + "@babel/plugin-proposal-export-namespace-from": "^7.12.1", + "@babel/plugin-proposal-json-strings": "^7.12.1", + "@babel/plugin-proposal-logical-assignment-operators": "^7.12.1", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.12.1", + "@babel/plugin-proposal-numeric-separator": "^7.12.7", + "@babel/plugin-proposal-object-rest-spread": "^7.12.1", + "@babel/plugin-proposal-optional-catch-binding": "^7.12.1", + "@babel/plugin-proposal-optional-chaining": "^7.12.7", + "@babel/plugin-proposal-private-methods": "^7.12.1", + "@babel/plugin-proposal-unicode-property-regex": "^7.12.1", "@babel/plugin-syntax-async-generators": "^7.8.0", - "@babel/plugin-syntax-class-properties": "^7.10.4", + "@babel/plugin-syntax-class-properties": "^7.12.1", "@babel/plugin-syntax-dynamic-import": "^7.8.0", "@babel/plugin-syntax-export-namespace-from": "^7.8.3", "@babel/plugin-syntax-json-strings": "^7.8.0", @@ -1035,45 +1014,42 @@ "@babel/plugin-syntax-object-rest-spread": "^7.8.0", "@babel/plugin-syntax-optional-catch-binding": "^7.8.0", "@babel/plugin-syntax-optional-chaining": "^7.8.0", - "@babel/plugin-syntax-top-level-await": "^7.10.4", - "@babel/plugin-transform-arrow-functions": "^7.10.4", - "@babel/plugin-transform-async-to-generator": "^7.10.4", - "@babel/plugin-transform-block-scoped-functions": "^7.10.4", - "@babel/plugin-transform-block-scoping": "^7.10.4", - "@babel/plugin-transform-classes": "^7.10.4", - "@babel/plugin-transform-computed-properties": "^7.10.4", - "@babel/plugin-transform-destructuring": "^7.10.4", - "@babel/plugin-transform-dotall-regex": "^7.10.4", - "@babel/plugin-transform-duplicate-keys": "^7.10.4", - "@babel/plugin-transform-exponentiation-operator": "^7.10.4", - "@babel/plugin-transform-for-of": "^7.10.4", - "@babel/plugin-transform-function-name": "^7.10.4", - "@babel/plugin-transform-literals": "^7.10.4", - "@babel/plugin-transform-member-expression-literals": "^7.10.4", - "@babel/plugin-transform-modules-amd": "^7.10.4", - "@babel/plugin-transform-modules-commonjs": "^7.10.4", - "@babel/plugin-transform-modules-systemjs": "^7.10.4", - "@babel/plugin-transform-modules-umd": "^7.10.4", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.10.4", - "@babel/plugin-transform-new-target": "^7.10.4", - "@babel/plugin-transform-object-super": "^7.10.4", - "@babel/plugin-transform-parameters": "^7.10.4", - "@babel/plugin-transform-property-literals": "^7.10.4", - "@babel/plugin-transform-regenerator": "^7.10.4", - "@babel/plugin-transform-reserved-words": "^7.10.4", - "@babel/plugin-transform-shorthand-properties": "^7.10.4", - "@babel/plugin-transform-spread": "^7.11.0", - "@babel/plugin-transform-sticky-regex": "^7.10.4", - "@babel/plugin-transform-template-literals": "^7.10.4", - "@babel/plugin-transform-typeof-symbol": "^7.10.4", - "@babel/plugin-transform-unicode-escapes": "^7.10.4", - "@babel/plugin-transform-unicode-regex": "^7.10.4", + "@babel/plugin-syntax-top-level-await": "^7.12.1", + "@babel/plugin-transform-arrow-functions": "^7.12.1", + "@babel/plugin-transform-async-to-generator": "^7.12.1", + "@babel/plugin-transform-block-scoped-functions": "^7.12.1", + "@babel/plugin-transform-block-scoping": "^7.12.1", + "@babel/plugin-transform-classes": "^7.12.1", + "@babel/plugin-transform-computed-properties": "^7.12.1", + "@babel/plugin-transform-destructuring": "^7.12.1", + "@babel/plugin-transform-dotall-regex": "^7.12.1", + "@babel/plugin-transform-duplicate-keys": "^7.12.1", + "@babel/plugin-transform-exponentiation-operator": "^7.12.1", + "@babel/plugin-transform-for-of": "^7.12.1", + "@babel/plugin-transform-function-name": "^7.12.1", + "@babel/plugin-transform-literals": "^7.12.1", + "@babel/plugin-transform-member-expression-literals": "^7.12.1", + "@babel/plugin-transform-modules-amd": "^7.12.1", + "@babel/plugin-transform-modules-commonjs": "^7.12.1", + "@babel/plugin-transform-modules-systemjs": "^7.12.1", + "@babel/plugin-transform-modules-umd": "^7.12.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.1", + "@babel/plugin-transform-new-target": "^7.12.1", + "@babel/plugin-transform-object-super": "^7.12.1", + "@babel/plugin-transform-parameters": "^7.12.1", + "@babel/plugin-transform-property-literals": "^7.12.1", + "@babel/plugin-transform-regenerator": "^7.12.1", + "@babel/plugin-transform-reserved-words": "^7.12.1", + "@babel/plugin-transform-shorthand-properties": "^7.12.1", + "@babel/plugin-transform-spread": "^7.12.1", + "@babel/plugin-transform-sticky-regex": "^7.12.7", + "@babel/plugin-transform-template-literals": "^7.12.1", + "@babel/plugin-transform-typeof-symbol": "^7.12.10", + "@babel/plugin-transform-unicode-escapes": "^7.12.1", + "@babel/plugin-transform-unicode-regex": "^7.12.1", "@babel/preset-modules": "^0.1.3", - "@babel/types": "^7.11.5", - "browserslist": "^4.12.0", - "core-js-compat": "^3.6.2", - "invariant": "^2.2.2", - "levenary": "^1.1.1", + "@babel/types": "^7.12.10", + "core-js-compat": "^3.8.0", "semver": "^5.5.0" }, "dependencies": { @@ -1097,9 +1073,9 @@ } }, "@babel/runtime": { - "version": "7.11.2", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.11.2.tgz", - "integrity": "sha512-TeWkU52so0mPtDcaCTxNBI/IHiz0pZgr8VEFqXFtZWpYD08ZB6FaSwVAS8MKRQAP3bYKiVjwysOJgMFY28o6Tw==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.12.5.tgz", + "integrity": "sha512-plcc+hbExy3McchJCEQG3knOsuh3HH+Prx1P6cLIkET/0dLuQDEnrT+s27Axgc9bqfsmNUNHfscgMUdBpC9xfg==", "requires": { "regenerator-runtime": "^0.13.4" }, @@ -1112,35 +1088,35 @@ } }, "@babel/template": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.10.4.tgz", - "integrity": "sha512-ZCjD27cGJFUB6nmCB1Enki3r+L5kJveX9pq1SvAUKoICy6CZ9yD8xO086YXdYhvNjBdnekm4ZnaP5yC8Cs/1tA==", + "version": "7.12.7", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.7.tgz", + "integrity": "sha512-GkDzmHS6GV7ZeXfJZ0tLRBhZcMcY0/Lnb+eEbXDBfCAcZCjrZKe6p3J4we/D24O9Y8enxWAg1cWwof59yLh2ow==", "requires": { "@babel/code-frame": "^7.10.4", - "@babel/parser": "^7.10.4", - "@babel/types": "^7.10.4" + "@babel/parser": "^7.12.7", + "@babel/types": "^7.12.7" } }, "@babel/traverse": { - "version": "7.11.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.11.5.tgz", - "integrity": "sha512-EjiPXt+r7LiCZXEfRpSJd+jUMnBd4/9OUv7Nx3+0u9+eimMwJmG0Q98lw4/289JCoxSE8OolDMNZaaF/JZ69WQ==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.10.tgz", + "integrity": "sha512-6aEtf0IeRgbYWzta29lePeYSk+YAFIC3kyqESeft8o5CkFlYIMX+EQDDWEiAQ9LHOA3d0oHdgrSsID/CKqXJlg==", "requires": { "@babel/code-frame": "^7.10.4", - "@babel/generator": "^7.11.5", + "@babel/generator": "^7.12.10", "@babel/helper-function-name": "^7.10.4", "@babel/helper-split-export-declaration": "^7.11.0", - "@babel/parser": "^7.11.5", - "@babel/types": "^7.11.5", + "@babel/parser": "^7.12.10", + "@babel/types": "^7.12.10", "debug": "^4.1.0", "globals": "^11.1.0", "lodash": "^4.17.19" }, "dependencies": { "debug": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", - "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "requires": { "ms": "2.1.2" } @@ -1153,9 +1129,9 @@ } }, "@babel/types": { - "version": "7.11.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.11.5.tgz", - "integrity": "sha512-bvM7Qz6eKnJVFIn+1LPtjlBFPVN5jNDc1XmN15vWe7Q3DPBufWWsLiIvUu7xW87uTG6QoggpIDnUgLQvPheU+Q==", + "version": "7.12.10", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.10.tgz", + "integrity": "sha512-sf6wboJV5mGyip2hIpDSKsr80RszPinEFjsHTalMxZAZkoQ2/2yQzxlcFN52SJqsyPfLtPmenL4g2KB3KJXPDw==", "requires": { "@babel/helper-validator-identifier": "^7.10.4", "lodash": "^4.17.19", @@ -1196,10 +1172,10 @@ "follow-redirects": "1.5.10" } }, - "fuse.js": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", - "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==" + "entities": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", + "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" }, "markdown-it": { "version": "10.0.0", @@ -1255,11 +1231,6 @@ "@types/babel-types": "*" } }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" - }, "@types/glob": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", @@ -1280,9 +1251,9 @@ "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==" }, "@types/node": { - "version": "14.11.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.11.2.tgz", - "integrity": "sha512-jiE3QIxJ8JLNcb1Ps6rDbysDhN4xa8DJJvuC9prr6w+1tIh+QAbYyNF3tyiZNLDBIuBCf4KEcV2UvQm/V60xfA==" + "version": "14.14.12", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.12.tgz", + "integrity": "sha512-ASH8OPHMNlkdjrEdmoILmzFfsJICvhBsFfAum4aKZ/9U4B6M6tTmTPh+f3ttWdD74CEGV5XvXWkbyfSdXaTd7g==" }, "@types/q": { "version": "1.5.4", @@ -1290,9 +1261,9 @@ "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" }, "@vue/babel-helper-vue-jsx-merge-props": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.0.0.tgz", - "integrity": "sha512-6tyf5Cqm4m6v7buITuwS+jHzPlIPxbFzEhXR5JGZpbrvOcp1hiQKckd305/3C7C36wFekNTQSxAtgeM0j0yoUw==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz", + "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA==" }, "@vue/babel-helper-vue-transform-on": { "version": "1.0.0-rc.2", @@ -1300,12 +1271,13 @@ "integrity": "sha512-1+7CwjQ0Kasml6rHoNQUmbISwqLNNfFVBUcZl6QBremUl296ZmLrVQPqJP5pyAAWjZke5bpI1hlj+LVVuT7Jcg==" }, "@vue/babel-plugin-jsx": { - "version": "1.0.0-rc.3", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.0-rc.3.tgz", - "integrity": "sha512-/Ibq0hoKsidnHWPhgRpjcjYhYcHpqEm2fiKVAPO88OXZNHGwaGgS4yXkC6TDEvlZep4mBDo+2S5T81wpbVh90Q==", + "version": "1.0.0-rc.4", + "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.0-rc.4.tgz", + "integrity": "sha512-ifzYc0jfLqiQebfqzKrJGfmQFE1lIgFlE9Ive8hQMJS/GC9Y+mNtHpqmWyqljbFGsqmsxmMRNFdAUgz0HZN1rg==", "requires": { "@babel/helper-module-imports": "^7.0.0", "@babel/plugin-syntax-jsx": "^7.0.0", + "@babel/template": "^7.0.0", "@babel/traverse": "^7.0.0", "@babel/types": "^7.0.0", "@vue/babel-helper-vue-transform-on": "^1.0.0-rc.2", @@ -1315,20 +1287,20 @@ }, "dependencies": { "camelcase": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.0.0.tgz", - "integrity": "sha512-8KMDF1Vz2gzOq54ONPJS65IvTUaB1cHJ2DMM7MbPmLZljDH1qpzzLsWdiN9pHh6qvkRVDTi/07+eNGch/oLU4w==" + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==" } } }, "@vue/babel-plugin-transform-vue-jsx": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.1.2.tgz", - "integrity": "sha512-YfdaoSMvD1nj7+DsrwfTvTnhDXI7bsuh+Y5qWwvQXlD24uLgnsoww3qbiZvWf/EoviZMrvqkqN4CBw0W3BWUTQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz", + "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==", "requires": { "@babel/helper-module-imports": "^7.0.0", "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.0.0", + "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", "html-tags": "^2.0.0", "lodash.kebabcase": "^4.1.1", "svg-tags": "^1.0.0" @@ -1342,9 +1314,9 @@ } }, "@vue/babel-preset-app": { - "version": "4.5.6", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.6.tgz", - "integrity": "sha512-Eps83UNiBJeqlbpR9afYnhvjVLElVtA4fDLNuVUr1r3RbepoxWuq+mUTr3TBArPQebnAaDcrZaNHBWTLRbfo3A==", + "version": "4.5.9", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.9.tgz", + "integrity": "sha512-d2H4hFnJsGnZtJAAZIbo1dmQJ2SI1MYix1Tc9/etlnJtCDPRHeCNodCSeuLgDwnoAyT3unzyHmTtaO56KRDuOQ==", "requires": { "@babel/core": "^7.11.0", "@babel/helper-compilation-targets": "^7.9.6", @@ -1365,49 +1337,67 @@ }, "dependencies": { "core-js": { - "version": "3.6.5", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.5.tgz", - "integrity": "sha512-vZVEEwZoIsI+vPEuoF9Iqf5H7/M3eeQqWlQnYa8FSKKePuYTf5MWnxb5SDAzCa60b3JBRS5g9b+Dq7b1y/RCrA==" + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.8.1.tgz", + "integrity": "sha512-9Id2xHY1W7m8hCl8NkhQn5CufmF/WuR30BTRewvCXc1aZd3kMECwNZ69ndLbekKfakw9Rf2Xyc+QR6E7Gg+obg==" } } }, "@vue/babel-preset-jsx": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.1.2.tgz", - "integrity": "sha512-zDpVnFpeC9YXmvGIDSsKNdL7qCG2rA3gjywLYHPCKDT10erjxF4U+6ay9X6TW5fl4GsDlJp9bVfAVQAAVzxxvQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz", + "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==", "requires": { - "@vue/babel-helper-vue-jsx-merge-props": "^1.0.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.1.2", - "@vue/babel-sugar-functional-vue": "^1.1.2", - "@vue/babel-sugar-inject-h": "^1.1.2", - "@vue/babel-sugar-v-model": "^1.1.2", - "@vue/babel-sugar-v-on": "^1.1.2" + "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", + "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", + "@vue/babel-sugar-composition-api-inject-h": "^1.2.1", + "@vue/babel-sugar-composition-api-render-instance": "^1.2.4", + "@vue/babel-sugar-functional-vue": "^1.2.2", + "@vue/babel-sugar-inject-h": "^1.2.2", + "@vue/babel-sugar-v-model": "^1.2.3", + "@vue/babel-sugar-v-on": "^1.2.3" + } + }, + "@vue/babel-sugar-composition-api-inject-h": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz", + "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==", + "requires": { + "@babel/plugin-syntax-jsx": "^7.2.0" + } + }, + "@vue/babel-sugar-composition-api-render-instance": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz", + "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==", + "requires": { + "@babel/plugin-syntax-jsx": "^7.2.0" } }, "@vue/babel-sugar-functional-vue": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.1.2.tgz", - "integrity": "sha512-YhmdJQSVEFF5ETJXzrMpj0nkCXEa39TvVxJTuVjzvP2rgKhdMmQzlJuMv/HpadhZaRVMCCF3AEjjJcK5q/cYzQ==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz", + "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0" } }, "@vue/babel-sugar-inject-h": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.1.2.tgz", - "integrity": "sha512-VRSENdTvD5htpnVp7i7DNuChR5rVMcORdXjvv5HVvpdKHzDZAYiLSD+GhnhxLm3/dMuk8pSzV+k28ECkiN5m8w==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz", + "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0" } }, "@vue/babel-sugar-v-model": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.1.2.tgz", - "integrity": "sha512-vLXPvNq8vDtt0u9LqFdpGM9W9IWDmCmCyJXuozlq4F4UYVleXJ2Fa+3JsnTZNJcG+pLjjfnEGHci2339Kj5sGg==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz", + "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.0.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.1.2", + "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", + "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", "camelcase": "^5.0.0", "html-tags": "^2.0.0", "svg-tags": "^1.0.0" @@ -1426,12 +1416,12 @@ } }, "@vue/babel-sugar-v-on": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.1.2.tgz", - "integrity": "sha512-T8ZCwC8Jp2uRtcZ88YwZtZXe7eQrJcfRq0uTFy6ShbwYJyz5qWskRFoVsdTi9o0WEhmQXxhQUewodOSCUPVmsQ==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz", + "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.1.2", + "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", "camelcase": "^5.0.0" }, "dependencies": { @@ -1475,17 +1465,17 @@ } }, "@vuepress/core": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.5.4.tgz", - "integrity": "sha512-RaHJiX0Yno4S3zoV64JNd3xE55sza8rayyWvXAJY381XVMxKrsLBrgW6ntNYSkzGnZcxi6fwMV/CVOUhEtkEkA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.7.1.tgz", + "integrity": "sha512-M5sxZq30Ke1vXa4ZZjk6185fwtpiJOqzXNnzcIe0GxtvtaF8Yij6b+KqQKlUJnnUXm+CKxiLCr8PTzDY26N7yw==", "requires": { "@babel/core": "^7.8.4", "@vue/babel-preset-app": "^4.1.2", - "@vuepress/markdown": "1.5.4", - "@vuepress/markdown-loader": "1.5.4", - "@vuepress/plugin-last-updated": "1.5.4", - "@vuepress/plugin-register-components": "1.5.4", - "@vuepress/shared-utils": "1.5.4", + "@vuepress/markdown": "1.7.1", + "@vuepress/markdown-loader": "1.7.1", + "@vuepress/plugin-last-updated": "1.7.1", + "@vuepress/plugin-register-components": "1.7.1", + "@vuepress/shared-utils": "1.7.1", "autoprefixer": "^9.5.1", "babel-loader": "^8.0.4", "cache-loader": "^3.0.0", @@ -1507,7 +1497,7 @@ "url-loader": "^1.0.1", "vue": "^2.6.10", "vue-loader": "^15.7.1", - "vue-router": "^3.1.3", + "vue-router": "^3.4.5", "vue-server-renderer": "^2.6.10", "vue-template-compiler": "^2.6.10", "vuepress-html-webpack-plugin": "^3.2.0", @@ -1520,18 +1510,18 @@ }, "dependencies": { "core-js": { - "version": "3.6.5", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.5.tgz", - "integrity": "sha512-vZVEEwZoIsI+vPEuoF9Iqf5H7/M3eeQqWlQnYa8FSKKePuYTf5MWnxb5SDAzCa60b3JBRS5g9b+Dq7b1y/RCrA==" + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.8.1.tgz", + "integrity": "sha512-9Id2xHY1W7m8hCl8NkhQn5CufmF/WuR30BTRewvCXc1aZd3kMECwNZ69ndLbekKfakw9Rf2Xyc+QR6E7Gg+obg==" } } }, "@vuepress/markdown": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.5.4.tgz", - "integrity": "sha512-bgrR9LTcAa2O0WipTbH3OFKeAfXc/2oU6cUIoMkyihSKUo1Mr5yt1XKM7vHe1uFEZygNr8EAemep8chsuVuISA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.7.1.tgz", + "integrity": "sha512-Ava9vJECHG1+RC53ut1dXSze35IH5tc3qesC06Ny37WS93iDSQy09j8y+a0Lugy12j1369+QQeRFWa40tdHczA==", "requires": { - "@vuepress/shared-utils": "1.5.4", + "@vuepress/shared-utils": "1.7.1", "markdown-it": "^8.4.1", "markdown-it-anchor": "^5.0.2", "markdown-it-chain": "^1.3.0", @@ -1560,61 +1550,61 @@ } }, "@vuepress/markdown-loader": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.5.4.tgz", - "integrity": "sha512-3R5quGIXQm7gfPWN67SVZ9OBA7VrGEEXJjjV01MYkbfhqVGgO6lBRq73Og0XdKs4RPx4nqJUPthhL8FJVNRTIg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.7.1.tgz", + "integrity": "sha512-GM1F/tRhP9qZydTC89FXJPlLH+BmZijMKom5BYLAMEXsU20A9kABTRoatPjOUbZuKT+gn03JgG97qVd8xa/ETw==", "requires": { - "@vuepress/markdown": "1.5.4", + "@vuepress/markdown": "1.7.1", "loader-utils": "^1.1.0", "lru-cache": "^5.1.1" } }, "@vuepress/plugin-active-header-links": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.5.4.tgz", - "integrity": "sha512-FI1Dr/44HVqxLMRSuaVEEwegGVEGFlaWYE3nsXwL7klKr6c+2kXHEw9rSQlAxzJyzVfovTk4dd+s/AMOKuLGZQ==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.7.1.tgz", + "integrity": "sha512-Wgf/oB9oPZLnYoLjQ/xbQc4Qa3RU5tXAo2dB4Xl/7bUL6SqBxO866kX3wPxKdSOIL58tq8iH9XbUe3Sxi8/ISQ==", "requires": { "lodash.debounce": "^4.0.8" } }, "@vuepress/plugin-google-analytics": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.5.4.tgz", - "integrity": "sha512-JVmIPBqKNiOVvow+XAZE+jgGvIRMBVxZOpKb5HBQ0xwz/E81Opl7cxdX9VkPKX+i9VoRBPWEiD/874RivzbW1g==" + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz", + "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ==" }, "@vuepress/plugin-last-updated": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.5.4.tgz", - "integrity": "sha512-9kezBCxPM+cevKRNML6Q7v6qkI8NQvKbVkwohlzsElM8FBmjlZmgFyZje66ksTnb/U6ogazCCq9jdOyipNcQ2A==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.7.1.tgz", + "integrity": "sha512-VW5jhBuO0WRHDsBmFsKC6QtEyBLCgyhuH9nQ65aairCn3tdoJPz0uQ4g3lr/boVbgsPexO677Sn3dRPgYqnMug==", "requires": { "cross-spawn": "^6.0.5" } }, "@vuepress/plugin-nprogress": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.5.4.tgz", - "integrity": "sha512-2bGKoO/o2e5mIfOU80q+AkxOK5wVijA/+8jGjSQVf2ccMpJw+Ly1mMi69r81Q0QkEihgfI9VN42a5+a6LUgPBw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.7.1.tgz", + "integrity": "sha512-KtqfI3RitbsEbm22EhbooTvhjfMf6zttKlbND7LcyJwP3MEPVYyzQJuET03hk9z4SgCfNV2r/W3sYyejzzTMog==", "requires": { "nprogress": "^0.2.0" } }, "@vuepress/plugin-register-components": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.5.4.tgz", - "integrity": "sha512-Y1U9j6unZp1ZhnHjQ9yOPY+vxldUA3C1EwT6UgI75j5gxa5Hz6NakoIo6mbhaYHlGmx33o/MXrxufLPapo/YlQ==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.7.1.tgz", + "integrity": "sha512-MlFdH6l3rTCJlGMvyssXVG998cq5LSMzxCuQLYcRdtHQT4HbikIcV4HSPGarWInD1mP12+qX/PvKUawGwp1eVg==", "requires": { - "@vuepress/shared-utils": "1.5.4" + "@vuepress/shared-utils": "1.7.1" } }, "@vuepress/plugin-search": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.5.4.tgz", - "integrity": "sha512-wikU9XYiZ3Olbii0lI+56mcSdpzHHkduVBMB4MNEV5iob23qDxGPmvfZirjsZV20w1UnLRptERyHtZkTLW9Mbg==" + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.7.1.tgz", + "integrity": "sha512-OmiGM5eYg9c+uC50b6/cSxAhqxfD7AIui6JEztFGeECrlP33RLHmteXK9YBBZjp5wTNmoYs+NXI/cWggYUPW8Q==" }, "@vuepress/shared-utils": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.5.4.tgz", - "integrity": "sha512-HCeMPEAPjFN1Ongii0BUCI1iB4gBBiQ4PUgh7F4IGG8yBg4tMqWO4NHqCuDCuGEvK7lgHy8veto0SsSvdSKp3g==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.7.1.tgz", + "integrity": "sha512-ydB2ZKsFZE6hFRb9FWqzZksxAPIMJjtBawk50RP6F+YX5HbID/HlyYYZM9aDSbk6RTkjgB5UzJjggA2xM8POlw==", "requires": { "chalk": "^2.3.2", "escape-html": "^1.0.3", @@ -1628,16 +1618,16 @@ } }, "@vuepress/theme-default": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.5.4.tgz", - "integrity": "sha512-kHst1yXzqTiocVU7w9x4cfJ08vR9ZbREC6kTRtH1ytQSEUL5tM0b9HFicfg1kDp7YNq2qntRro+WmfjU9Ps/eg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.7.1.tgz", + "integrity": "sha512-a9HeTrlcWQj3ONHiABmlN2z9TyIxKfQtLsA8AL+WgjN3PikhFuZFIJGzfr+NLt67Y9oiI+S9ZfiaVyvWM+7bWQ==", "requires": { - "@vuepress/plugin-active-header-links": "1.5.4", - "@vuepress/plugin-nprogress": "1.5.4", - "@vuepress/plugin-search": "1.5.4", + "@vuepress/plugin-active-header-links": "1.7.1", + "@vuepress/plugin-nprogress": "1.7.1", + "@vuepress/plugin-search": "1.7.1", "docsearch.js": "^2.5.2", "lodash": "^4.17.15", - "stylus": "^0.54.5", + "stylus": "^0.54.8", "stylus-loader": "^3.0.2", "vuepress-plugin-container": "^2.0.2", "vuepress-plugin-smooth-scroll": "^0.0.3" @@ -1850,9 +1840,9 @@ "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=" }, "ajv": { - "version": "6.12.5", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.5.tgz", - "integrity": "sha512-lRF8RORchjpKG50/WFf8xmg7sgCLFiYNNnqdKflk63whMQcWR5ngGjiSXkL9bjxy6B2npOK2HSMN49jEBMSkag==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -1871,24 +1861,24 @@ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" }, "algoliasearch": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.5.1.tgz", - "integrity": "sha512-b6yT1vWMlBdVObQipKxvt0M6SEvGetVj+FFFlo0Fy06gkdj6WCJaS4t10Q/hC3I2VG9QmpCqlK3Esgg1y1E+uw==", - "requires": { - "@algolia/cache-browser-local-storage": "4.5.1", - "@algolia/cache-common": "4.5.1", - "@algolia/cache-in-memory": "4.5.1", - "@algolia/client-account": "4.5.1", - "@algolia/client-analytics": "4.5.1", - "@algolia/client-common": "4.5.1", - "@algolia/client-recommendation": "4.5.1", - "@algolia/client-search": "4.5.1", - "@algolia/logger-common": "4.5.1", - "@algolia/logger-console": "4.5.1", - "@algolia/requester-browser-xhr": "4.5.1", - "@algolia/requester-common": "4.5.1", - "@algolia/requester-node-http": "4.5.1", - "@algolia/transporter": "4.5.1" + "version": "4.8.3", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.8.3.tgz", + "integrity": "sha512-pljX9jEE2TQ3i1JayhG8afNdE8UuJg3O9c7unW6QO67yRWCKr6b0t5aKC3hSVtjt7pA2TQXLKoAISb4SHx9ozQ==", + "requires": { + "@algolia/cache-browser-local-storage": "4.8.3", + "@algolia/cache-common": "4.8.3", + "@algolia/cache-in-memory": "4.8.3", + "@algolia/client-account": "4.8.3", + "@algolia/client-analytics": "4.8.3", + "@algolia/client-common": "4.8.3", + "@algolia/client-recommendation": "4.8.3", + "@algolia/client-search": "4.8.3", + "@algolia/logger-common": "4.8.3", + "@algolia/logger-console": "4.8.3", + "@algolia/requester-browser-xhr": "4.8.3", + "@algolia/requester-common": "4.8.3", + "@algolia/requester-node-http": "4.8.3", + "@algolia/transporter": "4.8.3" } }, "align-text": { @@ -2146,14 +2136,14 @@ "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" }, "aws4": { - "version": "1.10.1", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.10.1.tgz", - "integrity": "sha512-zg7Hz2k5lI8kb7U32998pRRFin7zJlkfezGJjUc2heaD4Pw2wObakCDVzkKztTm/Ln7eiVvYsjqak0Ed4LkMDA==" + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", + "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" }, "axios": { - "version": "0.20.0", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.20.0.tgz", - "integrity": "sha512-ANA4rr2BDcmmAQLOKft2fufrtuvlqR+cXNNinUmvfeSNCOF98PZL+7M/v1zIdGo7OLjEA9J2gXJL+j4zGsl0bA==", + "version": "0.21.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.0.tgz", + "integrity": "sha512-fmkJBknJKoZwem3/IKSSLpkdNXZeBu5Q7GA/aRsr2btgrptmSCxi2oFjZHqGdK9DoTil9PIHlPIZw2EcRJXRvw==", "requires": { "follow-redirects": "^1.10.0" }, @@ -2166,25 +2156,14 @@ } }, "babel-loader": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.1.0.tgz", - "integrity": "sha512-7q7nC1tYOrqvUrN3LQK4GwSk/TQorZSOlO9C+RZDZpODgyN4ZlCqE5q9cDsyWOliN+aU9B4JX01xK9eJXowJLw==", + "version": "8.2.2", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz", + "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==", "requires": { - "find-cache-dir": "^2.1.0", + "find-cache-dir": "^3.3.1", "loader-utils": "^1.4.0", - "mkdirp": "^0.5.3", - "pify": "^4.0.1", + "make-dir": "^3.1.0", "schema-utils": "^2.6.5" - }, - "dependencies": { - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } - } } }, "babel-plugin-dynamic-import-node": { @@ -2276,9 +2255,9 @@ } }, "base64-js": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", - "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" }, "batch": { "version": "0.6.1", @@ -2393,11 +2372,10 @@ "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" }, "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, @@ -2542,19 +2520,12 @@ } }, "browserify-rsa": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.0.1.tgz", - "integrity": "sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ=", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz", + "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==", "requires": { - "bn.js": "^4.1.0", + "bn.js": "^5.0.0", "randombytes": "^2.0.1" - }, - "dependencies": { - "bn.js": { - "version": "4.11.9", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", - "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" - } } }, "browserify-sign": { @@ -2582,14 +2553,15 @@ } }, "browserslist": { - "version": "4.14.4", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.14.4.tgz", - "integrity": "sha512-7FOuawafVdEwa5Jv4nzeik/PepAjVte6HmVGHsjt2bC237jeL9QlcTBDF3PnHEvcC6uHwLGYPwZHNZMB7wWAnw==", + "version": "4.16.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.0.tgz", + "integrity": "sha512-/j6k8R0p3nxOC6kx5JGAxsnhc9ixaWJfYc+TNTzxg6+ARaESAvQGV7h0uNOB4t+pLQJZWzcrMxXOxjgsCj3dqQ==", "requires": { - "caniuse-lite": "^1.0.30001135", - "electron-to-chromium": "^1.3.570", - "escalade": "^3.1.0", - "node-releases": "^1.1.61" + "caniuse-lite": "^1.0.30001165", + "colorette": "^1.2.1", + "electron-to-chromium": "^1.3.621", + "escalade": "^3.1.1", + "node-releases": "^1.1.67" } }, "buffer": { @@ -2698,6 +2670,42 @@ "schema-utils": "^1.0.0" }, "dependencies": { + "find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "requires": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + } + }, "mkdirp": { "version": "0.5.5", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", @@ -2706,6 +2714,27 @@ "minimist": "^1.2.5" } }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "requires": { + "find-up": "^3.0.0" + } + }, "schema-utils": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", @@ -2715,6 +2744,11 @@ "ajv-errors": "^1.0.0", "ajv-keywords": "^3.1.0" } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" } } }, @@ -2752,6 +2786,15 @@ } } }, + "call-bind": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.0.tgz", + "integrity": "sha512-AEXsYIyyDY3MCzbwdhzG3Jx1R0J2wetQyUynn6dYHAO+bg8l1k7jwZtRv4ryryFs7EP+NDlikJlVe59jr0cM2w==", + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.0" + } + }, "call-me-maybe": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", @@ -2804,9 +2847,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001135", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001135.tgz", - "integrity": "sha512-ziNcheTGTHlu9g34EVoHQdIu5g4foc8EsxMGC7Xkokmvw0dqNtX8BS8RgCgFBaAiSp2IdjvBxNdh0ssib28eVQ==" + "version": "1.0.30001165", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001165.tgz", + "integrity": "sha512-8cEsSMwXfx7lWSUMA2s08z9dIgsnR5NAqjXP23stdsU3AUWkCr/rr4s4OFtHXn5XXr6+7kam3QFVoYyXNPdJPA==" }, "caseless": { "version": "0.12.0", @@ -2952,9 +2995,9 @@ } }, "clipboard-copy": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.1.0.tgz", - "integrity": "sha512-Xsu1NddBXB89IUauda5BIq3Zq73UWkjkaQlPQbLNvNsd5WBMnTWPNKYR6HGaySOxGYZ+BKxP2E9X4ElnI3yiPA==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", + "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" }, "cliui": { "version": "2.1.0", @@ -2994,12 +3037,12 @@ } }, "color": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/color/-/color-3.1.2.tgz", - "integrity": "sha512-vXTJhHebByxZn3lDvDJYw4lR5+uB3vuoHsuYA5AKuxRVn5wzzIfQKGLBmgdVRHKTJYeK5rvJcHnrd0Li49CFpg==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", + "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", "requires": { "color-convert": "^1.9.1", - "color-string": "^1.5.2" + "color-string": "^1.5.4" } }, "color-convert": { @@ -3016,9 +3059,9 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "color-string": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.4.tgz", + "integrity": "sha512-57yF5yt8Xa3czSEW1jfQDE79Idk0+AkN/4KWad6tbdxUmAs3MvjxlWSWD4deYytcRfoZ9nhKyFl1kj5tBvidbw==", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -3145,16 +3188,6 @@ "unique-string": "^2.0.0", "write-file-atomic": "^3.0.0", "xdg-basedir": "^4.0.0" - }, - "dependencies": { - "make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "requires": { - "semver": "^6.0.0" - } - } } }, "connect-history-api-fallback": { @@ -3288,6 +3321,24 @@ "webpack-log": "^2.0.0" }, "dependencies": { + "find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "requires": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, "globby": { "version": "7.1.1", "resolved": "https://registry.npmjs.org/globby/-/globby-7.1.1.tgz", @@ -3299,6 +3350,13 @@ "ignore": "^3.3.5", "pify": "^3.0.0", "slash": "^1.0.0" + }, + "dependencies": { + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=" + } } }, "ignore": { @@ -3306,10 +3364,44 @@ "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" }, - "pify": { + "locate-path": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", - "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=" + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "requires": { + "find-up": "^3.0.0" + } }, "schema-utils": { "version": "1.0.0", @@ -3321,6 +3413,11 @@ "ajv-keywords": "^3.1.0" } }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + }, "slash": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", @@ -3329,16 +3426,16 @@ } }, "core-js": { - "version": "2.6.11", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", - "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==" + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.12.tgz", + "integrity": "sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ==" }, "core-js-compat": { - "version": "3.6.5", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.5.tgz", - "integrity": "sha512-7ItTKOhOZbznhXAQ2g/slGg1PJV5zDO/WdkTwi7UEOJmkvsE32PWvx6mKtDjiMpjnR2CNf6BAD6sSxIlv7ptng==", + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.8.1.tgz", + "integrity": "sha512-a16TLmy9NVD1rkjUGbwuyWkiDoN0FDpAwrfLONvHFQx0D9k7J9y0srwMT8QP/Z6HE3MIFaVynEeYwZwPX1o5RQ==", "requires": { - "browserslist": "^4.8.5", + "browserslist": "^4.15.0", "semver": "7.0.0" }, "dependencies": { @@ -3628,26 +3725,26 @@ "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==" }, "csso": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.0.3.tgz", - "integrity": "sha512-NL3spysxUkcrOgnpsT4Xdl2aiEiBG6bXswAABQVHcMrfjjBisFOKwLDOmf4wf32aPdcJws1zds2B0Rg+jqMyHQ==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz", + "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==", "requires": { - "css-tree": "1.0.0-alpha.39" + "css-tree": "^1.1.2" }, "dependencies": { "css-tree": { - "version": "1.0.0-alpha.39", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.39.tgz", - "integrity": "sha512-7UvkEYgBAHRG9Nt980lYxjsTrCyHFN53ky3wVsDkiMdVqylqRt+Zc+jm5qw7/qyOvN2dHSYtX0e4MbCCExSvnA==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.2.tgz", + "integrity": "sha512-wCoWush5Aeo48GLhfHPbmvZs59Z+M7k5+B1xDnXbdWNcEF423DoFdqSWE0PM5aNk5nI5cp1q7ms36zGApY/sKQ==", "requires": { - "mdn-data": "2.0.6", + "mdn-data": "2.0.14", "source-map": "^0.6.1" } }, "mdn-data": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.6.tgz", - "integrity": "sha512-rQvjv71olwNHgiTbfPZFkJtjNMciWgswYeciZhtvWLO8bmX3TnhyA62I6sTWOyZssWHJJjY6/KiWwqQsWWsqOA==" + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" } } }, @@ -4084,9 +4181,9 @@ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, "electron-to-chromium": { - "version": "1.3.571", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.571.tgz", - "integrity": "sha512-UYEQ2Gtc50kqmyOmOVtj6Oqi38lm5yRJY3pLuWt6UIot0No1L09uu6Ja6/1XKwmz/p0eJFZTUZi+khd1PV1hHA==" + "version": "1.3.621", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.621.tgz", + "integrity": "sha512-FeIuBzArONbAmKmZIsZIFGu/Gc9AVGlVeVbhCq+G2YIl6QkT0TDn2HKN/FMf1btXEB9kEmIuQf3/lBTVAbmFOg==" }, "elliptic": { "version": "6.5.3", @@ -4181,9 +4278,9 @@ } }, "entities": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.2.tgz", - "integrity": "sha512-dmD3AvJQBUjKpcNkoqr+x+IF0SdRtPz9Vk0uTy4yWqga9ibB6s4v++QFWNohjiUGoMlF552ZvNyXDxz5iW0qmw==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", + "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" }, "envify": { "version": "4.1.0", @@ -4216,20 +4313,20 @@ } }, "es-abstract": { - "version": "1.18.0-next.0", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.0.tgz", - "integrity": "sha512-elZXTZXKn51hUBdJjSZGYRujuzilgXo8vSPQzjGYXLvSlGiCo8VO8ZGV3kjo9a0WNJJ57hENagwbtlRuHuzkcQ==", + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", "requires": { "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", + "is-callable": "^1.2.2", "is-negative-zero": "^2.0.0", "is-regex": "^1.1.1", "object-inspect": "^1.8.0", "object-keys": "^1.1.1", - "object.assign": "^4.1.0", + "object.assign": "^4.1.1", "string.prototype.trimend": "^1.0.1", "string.prototype.trimstart": "^1.0.1" } @@ -4250,9 +4347,9 @@ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, "escalade": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.0.tgz", - "integrity": "sha512-mAk+hPSO8fLDkhV7V0dXazH5pDc6MrjBTPyD3VeKzxnVFjH1MIxbCdqGZB9O8+EwWakZs3ZCbDS4IpRt79V1ig==" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" }, "escape-goat": { "version": "2.1.1", @@ -4624,21 +4721,22 @@ } }, "find-cache-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", - "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz", + "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==", "requires": { "commondir": "^1.0.1", - "make-dir": "^2.0.0", - "pkg-dir": "^3.0.0" + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" } }, "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "requires": { - "locate-path": "^3.0.0" + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" } }, "flush-write-stream": { @@ -4839,20 +4937,30 @@ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, "fuse.js": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-6.4.1.tgz", - "integrity": "sha512-+hAS7KYgLXontDh/vqffs7wIBw0ceb9Sx8ywZQhOsiQGcSO5zInGhttWOUYQYlvV/yYMJOacQ129Xs3mP3+oZQ==" + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", + "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==" }, "gensync": { - "version": "1.0.0-beta.1", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.1.tgz", - "integrity": "sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg==" + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==" }, "get-caller-file": { "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" }, + "get-intrinsic": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.0.1.tgz", + "integrity": "sha512-ZnWP+AmS1VUaLgTRy47+zKtjTxz+0xMpx3I52i+aalBK1QP19ggLF3Db89KJX7kjfOfP2eoa01qc++GwPgufPg==", + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1" + } + }, "get-stream": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", @@ -5339,9 +5447,9 @@ } }, "ieee754": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" }, "iferr": { "version": "0.1.5", @@ -5395,6 +5503,46 @@ "requires": { "pkg-dir": "^3.0.0", "resolve-cwd": "^2.0.0" + }, + "dependencies": { + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "requires": { + "find-up": "^3.0.0" + } + } } }, "imurmurhash": { @@ -5427,9 +5575,9 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, "ini": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==" + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", + "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==" }, "internal-ip": { "version": "4.3.0", @@ -5440,14 +5588,6 @@ "ipaddr.js": "^1.9.0" } }, - "invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "requires": { - "loose-envify": "^1.0.0" - } - }, "ip": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", @@ -5487,9 +5627,12 @@ } }, "is-arguments": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.0.4.tgz", - "integrity": "sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz", + "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==", + "requires": { + "call-bind": "^1.0.0" + } }, "is-arrayish": { "version": "0.2.1", @@ -5542,6 +5685,14 @@ "rgba-regex": "^1.0.0" } }, + "is-core-module": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.2.0.tgz", + "integrity": "sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ==", + "requires": { + "has": "^1.0.3" + } + }, "is-data-descriptor": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", @@ -5643,9 +5794,9 @@ } }, "is-negative-zero": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.0.tgz", - "integrity": "sha1-lVOxIbD6wohp2p7UWeIMdUN4hGE=" + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz", + "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==" }, "is-npm": { "version": "4.0.0", @@ -5809,9 +5960,9 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "js-yaml": { - "version": "3.14.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", - "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -5951,19 +6102,6 @@ "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz", "integrity": "sha1-odePw6UEdMuAhF07O24dpJpEbo4=" }, - "leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" - }, - "levenary": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/levenary/-/levenary-1.1.1.tgz", - "integrity": "sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ==", - "requires": { - "leven": "^3.1.0" - } - }, "linkify-it": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", @@ -5993,12 +6131,11 @@ } }, "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" + "p-locate": "^4.1.0" } }, "lodash": { @@ -6069,23 +6206,15 @@ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" }, "loglevel": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.0.tgz", - "integrity": "sha512-i2sY04nal5jDcagM3FMfG++T69GEEM8CYuOfeOIvmXzOIcwE9a/CJPR0MFM97pYMj/u10lzz7/zd7+qwhrBTqQ==" + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz", + "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==" }, "longest": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc=" }, - "loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" - } - }, "lower-case": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz", @@ -6105,19 +6234,11 @@ } }, "make-dir": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", - "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", "requires": { - "pify": "^4.0.1", - "semver": "^5.6.0" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } + "semver": "^6.0.0" } }, "map-cache": { @@ -6134,17 +6255,22 @@ } }, "markdown-it": { - "version": "11.0.1", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-11.0.1.tgz", - "integrity": "sha512-aU1TzmBKcWNNYvH9pjq6u92BML+Hz3h5S/QpfTFwiQF852pLT+9qHsrhM9JYipkOXZxGn+sGH8oyJE9FD9WezQ==", + "version": "12.0.3", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.3.tgz", + "integrity": "sha512-M57RsMv+QQmJHz1yCu0gTJRMx/LlxRPtrrw+2kb/CpDVK/graCmWO0qfNnz/SE1FCNdyq3pkMMZ+itTnyT/YGA==", "requires": { - "argparse": "^1.0.7", - "entities": "~2.0.0", + "argparse": "^2.0.1", + "entities": "~2.1.0", "linkify-it": "^3.0.1", "mdurl": "^1.0.1", "uc.micro": "^1.0.5" }, "dependencies": { + "argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, "linkify-it": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz", @@ -6501,9 +6627,9 @@ "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" }, "nan": { - "version": "2.14.1", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.1.tgz", - "integrity": "sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw==", + "version": "2.14.2", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", + "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", "optional": true }, "nanomatch": { @@ -6638,9 +6764,9 @@ } }, "node-releases": { - "version": "1.1.61", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.61.tgz", - "integrity": "sha512-DD5vebQLg8jLCOzwupn954fbIiZht05DAZs0k2u8NStSe6h9XdsuIQL8hSRKYiU8WUQRznmSDrKGbv3ObOmC7g==" + "version": "1.1.67", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.67.tgz", + "integrity": "sha512-V5QF9noGFl3EymEwUYzO+3NTDpGfQB4ve6Qfnzf3UNydMhjQRVPR1DZTuvWiLzaFJYw2fmDwAfnRNEVb64hSIg==" }, "nopt": { "version": "1.0.10", @@ -6735,37 +6861,17 @@ } }, "object-inspect": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.8.0.tgz", - "integrity": "sha512-jLdtEOB112fORuypAyl/50VRVIBIdVQOSUUGQHzJ4xBSbit81zRarz7GThkEFZy1RceYrWYcPcBFPQwHyAc1gA==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.9.0.tgz", + "integrity": "sha512-i3Bp9iTqwhaLZBxGkRfo5ZbE07BQRT7MGu8+nNgwW9ItGp1TzCTw2DLEoWwjClxBjOFI/hWljTAmYGCEwmtnOw==" }, "object-is": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.2.tgz", - "integrity": "sha512-5lHCz+0uufF6wZ7CRFWJN3hp8Jqblpgve06U5CMQ3f//6iDjPr2PEo9MWCjEssDsa+UZEL4PkFpr+BMop6aKzQ==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.4.tgz", + "integrity": "sha512-1ZvAZ4wlF7IyPVOcE1Omikt7UpaFlOQq0HlSti+ZvDH3UiD2brwGMwDbyV43jao2bKJ+4+WdPJHSd7kgzKYVqg==", "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - }, - "dependencies": { - "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimend": "^1.0.1", - "string.prototype.trimstart": "^1.0.1" - } - } + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" } }, "object-keys": { @@ -6782,43 +6888,24 @@ } }, "object.assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.1.tgz", - "integrity": "sha512-VT/cxmx5yaoHSOTSyrCygIDFco+RsibY2NM0a4RdEeY/4KgqezwFtK1yr3U67xYhqJSlASm2pKhLVzPj2lr4bA==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", "requires": { + "call-bind": "^1.0.0", "define-properties": "^1.1.3", - "es-abstract": "^1.18.0-next.0", "has-symbols": "^1.0.1", "object-keys": "^1.1.1" } }, "object.getownpropertydescriptors": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.0.tgz", - "integrity": "sha512-Z53Oah9A3TdLoblT7VKJaTDdXdT+lQO+cNpKVnya5JDe9uLvzu1YyY1yFDFrcxrlRgWrEFH0jJtD/IbuwjcEVg==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.1.tgz", + "integrity": "sha512-6DtXgZ/lIZ9hqx4GtZETobXLR/ZLaa0aqV0kzbn80Rf8Z2e/XFnhA0I7p07N2wH8bBBltr2xQPi6sbKWAY2Eng==", "requires": { + "call-bind": "^1.0.0", "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1" - }, - "dependencies": { - "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimend": "^1.0.1", - "string.prototype.trimstart": "^1.0.1" - } - } + "es-abstract": "^1.18.0-next.1" } }, "object.pick": { @@ -6830,34 +6917,14 @@ } }, "object.values": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.1.tgz", - "integrity": "sha512-WTa54g2K8iu0kmS/us18jEmdv1a4Wi//BZ/DTVYEcH0XhLM5NYdpDHja3gt57VrZLcNAO2WGA+KpWsDBaHt6eA==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.2.tgz", + "integrity": "sha512-MYC0jvJopr8EK6dPBiO8Nb9mvjdypOachO5REGk6MXzujbBrAisKo3HmdEI6kZDL6fC31Mwee/5YbtMebixeag==", "requires": { + "call-bind": "^1.0.0", "define-properties": "^1.1.3", - "es-abstract": "^1.17.0-next.1", - "function-bind": "^1.1.1", + "es-abstract": "^1.18.0-next.1", "has": "^1.0.3" - }, - "dependencies": { - "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimend": "^1.0.1", - "string.prototype.trimstart": "^1.0.1" - } - } } }, "obuf": { @@ -6940,11 +7007,11 @@ } }, "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "requires": { - "p-limit": "^2.0.0" + "p-limit": "^2.2.0" } }, "p-map": { @@ -7078,9 +7145,9 @@ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=" }, "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==" }, "path-is-absolute": { "version": "1.0.1", @@ -7164,11 +7231,11 @@ } }, "pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", "requires": { - "find-up": "^3.0.0" + "find-up": "^4.0.0" } }, "portfinder": { @@ -7182,9 +7249,9 @@ }, "dependencies": { "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "requires": { "ms": "^2.1.1" } @@ -7198,9 +7265,9 @@ } }, "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" } } }, @@ -7210,9 +7277,9 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.34", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.34.tgz", - "integrity": "sha512-H/7V2VeNScX9KE83GDrDZNiGT1m2H+UTnlinIzhjlLX9hfMUn1mHNnGeX81a1c8JSBdBvqk7c2ZOG6ZPn5itGw==", + "version": "7.0.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", + "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", "requires": { "chalk": "^2.4.2", "source-map": "^0.6.1", @@ -7230,9 +7297,9 @@ } }, "postcss-calc": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.4.tgz", - "integrity": "sha512-0I79VRAd1UTkaHzY9w83P39YGO/M3bG7/tNLrHGEunBolfoGM0hSjrGvjoeaj0JE/zIw5GsI2KZ0UwDJqv5hjw==", + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", + "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", "requires": { "postcss": "^7.0.27", "postcss-selector-parser": "^6.0.2", @@ -7307,9 +7374,9 @@ } }, "postcss-load-config": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.1.tgz", - "integrity": "sha512-D2ENobdoZsW0+BHy4x1CAkXtbXtYWYRIxL/JbtRBqrRGOPtJ2zoga/bEZWhV/ShWB5saVxJMzbMdSyA/vv4tXw==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz", + "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==", "requires": { "cosmiconfig": "^5.0.0", "import-cwd": "^2.0.0" @@ -7707,9 +7774,9 @@ } }, "postcss-selector-parser": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.3.tgz", - "integrity": "sha512-0ClFaY4X1ra21LRqbW6y3rUbWcxnSVkDFG57R7Nxus9J9myPFlv+jYDMohzpkBx0RrjjiqjtycpchQ+PLGmZ9w==", + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", + "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", "requires": { "cssesc": "^3.0.0", "indexes-of": "^1.0.1", @@ -7762,12 +7829,12 @@ "optional": true }, "pretty-error": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.1.tgz", - "integrity": "sha1-X0+HyPkeWuPzuoerTPXgOxoX8aM=", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz", + "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==", "requires": { - "renderkid": "^2.0.1", - "utila": "~0.4" + "lodash": "^4.17.20", + "renderkid": "^2.0.4" } }, "pretty-time": { @@ -7776,9 +7843,9 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.21.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.21.0.tgz", - "integrity": "sha512-uGdSIu1nk3kej2iZsLyDoJ7e9bnPzIgY0naW/HdknGj61zScaprVEVGHrPoXqI+M9sP0NDnTK2jpkvmldpuqDw==", + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.22.0.tgz", + "integrity": "sha512-lLJ/Wt9yy0AiSYBf212kK3mM5L8ycwlyTlSxHBAneXLR0nzFMlZ5y7riFPF3E33zXOF2IH95xdY5jIyZbM9z/w==", "requires": { "clipboard": "^2.0.0" } @@ -7947,9 +8014,9 @@ } }, "pug-plain-loader": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.0.0.tgz", - "integrity": "sha512-mDfq/qvJJ0xdug38mZ1ObW0BQTx9kAHnKqotXC+C00XQkKmsWaMe90JUg/kN4lS6MU7tpVsMZ+rmcnBSPfDtHA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.1.0.tgz", + "integrity": "sha512-1nYgIJLaahRuHJHhzSPODV44aZfb00bO7kiJiMkke6Hj4SVZftuvx6shZ4BOokk50dJc2RSFqNUBOlus0dniFQ==", "requires": { "loader-utils": "^1.1.0" } @@ -8008,9 +8075,9 @@ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, "pupa": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.0.1.tgz", - "integrity": "sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", + "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", "requires": { "escape-goat": "^2.0.0" } @@ -8159,9 +8226,9 @@ } }, "regenerate": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.1.tgz", - "integrity": "sha512-j2+C8+NtXQgEKWk49MMP5P/u2GhnahTtVkRIHr5R5lVRlbKvmQ+oS+A5aLKWp2ma5VkT8sh6v+v4hbH0YHR66A==" + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" }, "regenerate-unicode-properties": { "version": "8.2.0", @@ -8222,19 +8289,19 @@ }, "dependencies": { "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", + "version": "1.17.7", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.7.tgz", + "integrity": "sha512-VBl/gnfcJ7OercKA9MVaegWsBHFjV492syMudcnQZvt/Dw8ezpcOHYZXa/J96O8vx+g4x65YKhxOwDUh63aS5g==", "requires": { "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", + "is-callable": "^1.2.2", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", "object-keys": "^1.1.1", - "object.assign": "^4.1.0", + "object.assign": "^4.1.1", "string.prototype.trimend": "^1.0.1", "string.prototype.trimstart": "^1.0.1" } @@ -8255,9 +8322,9 @@ } }, "registry-auth-token": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.0.tgz", - "integrity": "sha512-P+lWzPrsgfN+UEpDS3U8AQKg/UjZX6mQSJueZj3EK+vNESoqBSpBUD3gmu4sF9lOsjXWjF11dQKUqemf3veq1w==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz", + "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==", "requires": { "rc": "^1.2.8" } @@ -8301,15 +8368,15 @@ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" }, "renderkid": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.3.tgz", - "integrity": "sha512-z8CLQp7EZBPCwCnncgf9C4XAi3WR0dv+uWu/PjIyhhAb5d6IJ/QZqlHFprHeKT+59//V6BNUsLbvN8+2LarxGA==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.4.tgz", + "integrity": "sha512-K2eXrSOJdq+HuKzlcjOlGoOarUu5SDguDEhE7+Ah4zuOWL40j8A/oHvLlLob9PSTNvVnBd+/q0Er1QfpEuem5g==", "requires": { "css-select": "^1.1.0", "dom-converter": "^0.2", "htmlparser2": "^3.3.0", - "strip-ansi": "^3.0.0", - "utila": "^0.4.0" + "lodash": "^4.17.20", + "strip-ansi": "^3.0.0" } }, "repeat-element": { @@ -8372,10 +8439,11 @@ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" }, "resolve": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.17.0.tgz", - "integrity": "sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==", + "version": "1.19.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.19.0.tgz", + "integrity": "sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg==", "requires": { + "is-core-module": "^2.1.0", "path-parse": "^1.0.6" } }, @@ -8858,9 +8926,9 @@ }, "dependencies": { "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", "requires": { "ms": "^2.1.1" } @@ -8874,9 +8942,9 @@ } }, "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" } } }, @@ -8937,9 +9005,9 @@ }, "dependencies": { "debug": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", - "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "requires": { "ms": "2.1.2" } @@ -8965,9 +9033,9 @@ }, "dependencies": { "debug": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", - "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "requires": { "ms": "2.1.2" } @@ -9041,9 +9109,19 @@ "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==" }, "stack-utils": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.2.tgz", - "integrity": "sha512-MTX+MeG5U994cazkjd/9KNAapsHnibjMLnfXodlkXw76JEea0UiNzrqidzo1emMwk7w5Qhc9jd4Bn9TBb1MFwA==" + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.4.tgz", + "integrity": "sha512-IPDJfugEGbfizBwBZRZ3xpccMdRyP5lqsBWXGQWimVjua/ccLCeMOAVjlc1R7LxFjo5sEDhyNIXd8mo/AiDS9w==", + "requires": { + "escape-string-regexp": "^2.0.0" + }, + "dependencies": { + "escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==" + } + } }, "static-extend": { "version": "0.1.2", @@ -9201,61 +9279,21 @@ } }, "string.prototype.trimend": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.1.tgz", - "integrity": "sha512-LRPxFUaTtpqYsTeNKaFOw3R4bxIzWOnbQ837QfBylo8jIxtcbK/A/sMV7Q+OAV/vWo+7s25pOE10KYSjaSO06g==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.3.tgz", + "integrity": "sha512-ayH0pB+uf0U28CtjlLvL7NaohvR1amUvVZk+y3DYb0Ey2PUV5zPkkKy9+U1ndVEIXO8hNg18eIv9Jntbii+dKw==", "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - }, - "dependencies": { - "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimend": "^1.0.1", - "string.prototype.trimstart": "^1.0.1" - } - } + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" } }, "string.prototype.trimstart": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.1.tgz", - "integrity": "sha512-XxZn+QpvrBI1FOcg6dIpxUPgWCPuNXvMD72aaRaUQv1eD4e/Qy8i/hFTe0BUmD60p/QA6bh1avmuPTfNjqVWRw==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.3.tgz", + "integrity": "sha512-oBIBUy5lea5tt0ovtOFiEQaBkoBBkyJhZXzJYrSmDo5IUUqbOPvVezuRs/agBIdZ2p2Eo1FD6bD9USyBLfl3xg==", "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - }, - "dependencies": { - "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", - "requires": { - "es-to-primitive": "^1.2.1", - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", - "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimend": "^1.0.1", - "string.prototype.trimstart": "^1.0.1" - } - } + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" } }, "string_decoder": { @@ -9388,9 +9426,9 @@ } }, "css-what": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.3.0.tgz", - "integrity": "sha512-pv9JPyatiPaQ6pf4OvD/dbfm0o5LviWmwxNWzblYf/1u9QZd0ihV+PMwy5jdQWQ3349kZmKEx9WXuSka2dM4cg==" + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", + "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==" }, "domutils": { "version": "1.7.0", @@ -9417,9 +9455,9 @@ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==" }, "term-size": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.0.tgz", - "integrity": "sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw==" + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz", + "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==" }, "terser": { "version": "4.8.0", @@ -9454,6 +9492,63 @@ "worker-farm": "^1.7.0" }, "dependencies": { + "find-cache-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", + "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==", + "requires": { + "commondir": "^1.0.1", + "make-dir": "^2.0.0", + "pkg-dir": "^3.0.0" + } + }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, + "make-dir": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz", + "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==", + "requires": { + "pify": "^4.0.1", + "semver": "^5.6.0" + } + }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + }, + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", + "requires": { + "find-up": "^3.0.0" + } + }, "schema-utils": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", @@ -9463,6 +9558,11 @@ "ajv-errors": "^1.0.0", "ajv-keywords": "^3.1.0" } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" } } }, @@ -9520,9 +9620,9 @@ "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" }, "timers-browserify": { - "version": "2.0.11", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz", - "integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==", + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", + "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", "requires": { "setimmediate": "^1.0.4" } @@ -9658,9 +9758,9 @@ } }, "tslib": { - "version": "1.13.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.13.0.tgz", - "integrity": "sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q==" + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" }, "tty-browserify": { "version": "0.0.0", @@ -9861,9 +9961,9 @@ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==" }, "update-notifier": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.1.tgz", - "integrity": "sha512-9y+Kds0+LoLG6yN802wVXoIfxYEwh3FlZwzMwpCZp62S2i1/Jzeqb9Eeeju3NSHccGGasfGlK5/vEHbAifYRDg==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz", + "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==", "requires": { "boxen": "^4.2.0", "chalk": "^3.0.0", @@ -9881,11 +9981,10 @@ }, "dependencies": { "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, @@ -10036,19 +10135,19 @@ }, "dependencies": { "es-abstract": { - "version": "1.17.6", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", - "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", + "version": "1.17.7", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.7.tgz", + "integrity": "sha512-VBl/gnfcJ7OercKA9MVaegWsBHFjV492syMudcnQZvt/Dw8ezpcOHYZXa/J96O8vx+g4x65YKhxOwDUh63aS5g==", "requires": { "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1", - "is-callable": "^1.2.0", - "is-regex": "^1.1.0", - "object-inspect": "^1.7.0", + "is-callable": "^1.2.2", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", "object-keys": "^1.1.1", - "object.assign": "^4.1.0", + "object.assign": "^4.1.1", "string.prototype.trimend": "^1.0.1", "string.prototype.trimstart": "^1.0.1" } @@ -10116,9 +10215,9 @@ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" }, "vue-loader": { - "version": "15.9.3", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.3.tgz", - "integrity": "sha512-Y67VnGGgVLH5Voostx8JBZgPQTlDQeOVBLOEsjc2cXbCYBKexSKEpOA56x0YZofoDOTszrLnIShyOX1p9uCEHA==", + "version": "15.9.5", + "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.5.tgz", + "integrity": "sha512-oeMOs2b5o5gRqkxfds10bCx6JeXYTwivRgbb8hzOrcThD2z1+GqEKE3EX9A2SGbsYDf4rXwRg6D5n1w0jO5SwA==", "requires": { "@vue/component-compiler-utils": "^3.1.0", "hash-sum": "^1.0.2", @@ -10128,9 +10227,9 @@ } }, "vue-router": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.4.3.tgz", - "integrity": "sha512-BADg1mjGWX18Dpmy6bOGzGNnk7B/ZA0RxuA6qedY/YJwirMfKXIDzcccmHbQI0A6k5PzMdMloc0ElHfyOoX35A==" + "version": "3.4.9", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.4.9.tgz", + "integrity": "sha512-CGAKWN44RqXW06oC+u4mPgHLQQi2t6vLD/JbGRDAXm0YpMv0bgpKuU5bBd7AvMgfTz9kXVRIWKHqRwGEb8xFkA==" }, "vue-server-renderer": { "version": "2.6.12", @@ -10208,12 +10307,12 @@ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" }, "vuepress": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.5.4.tgz", - "integrity": "sha512-F25r65BzxDFAJmWIN9s9sQSndLIf1ldAKEwkeXCqE4p2lsx/eVvQJL3DzOeeR2WgCFOkhFMKWIV+CthTGdNTZg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.7.1.tgz", + "integrity": "sha512-AdA3do1L4DNzeF8sMTE+cSUJ5hR/6f3YujU8DVowi/vFOg/SX2lJF8urvDkZUSXzaAT6aSgkI9L+B6D+i7SJjA==", "requires": { - "@vuepress/core": "1.5.4", - "@vuepress/theme-default": "1.5.4", + "@vuepress/core": "1.7.1", + "@vuepress/theme-default": "1.7.1", "cac": "^6.5.6", "envinfo": "^7.2.0", "opencollective-postinstall": "^2.0.2", @@ -10280,6 +10379,11 @@ "markdown-it-container": "^2.0.0" } }, + "vuepress-plugin-google-tag-manager": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/vuepress-plugin-google-tag-manager/-/vuepress-plugin-google-tag-manager-0.0.5.tgz", + "integrity": "sha512-Hm1GNDdNmc4Vs9c3OMfTtHicB/oZWNCmzMFPdlOObVN1OjizIjImdm+LZIwiVKVndT2TQ4BPhMx7HQkovmD2Lg==" + }, "vuepress-plugin-sitemap": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/vuepress-plugin-sitemap/-/vuepress-plugin-sitemap-2.3.1.tgz", @@ -10297,25 +10401,24 @@ } }, "vuepress-theme-cosmos": { - "version": "1.0.173", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.173.tgz", - "integrity": "sha512-f2VhR7iE5bvoyqO6Y6QAHsIfQaI1t2UJx2ZH+M0v0VWHQEa34A8GqynYsjtMevkbHlE6ffuMT6haeHnp5cSeGA==", + "version": "1.0.179", + "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.179.tgz", + "integrity": "sha512-BJw/SGn7AmvaSU5Rw74BIRuAkmVZMuf1Gd1++7vpPudgNHEmWSKOHULpIdtkzn1WPXF8sgf4hZQ7+PhESk7JUQ==", "requires": { "@cosmos-ui/vue": "^0.35.0", - "@vuepress/plugin-google-analytics": "1.5.4", + "@vuepress/plugin-google-analytics": "1.7.1", "algoliasearch": "^4.2.0", - "axios": "^0.20.0", + "axios": "^0.21.0", "cheerio": "^1.0.0-rc.3", "clipboard-copy": "^3.1.0", - "entities": "2.0.2", + "entities": "2.1.0", "esm": "^3.2.25", - "fuse.js": "6.4.1", "gray-matter": "^4.0.2", "hotkeys-js": "3.8.1", "jsonp": "^0.2.1", - "markdown-it": "^11.0.1", + "markdown-it": "^12.0.0", "markdown-it-attrs": "^3.0.3", - "prismjs": "^1.21.0", + "prismjs": "^1.22.0", "pug": "^2.0.4", "pug-plain-loader": "^1.0.0", "stylus": "^0.54.8", @@ -10323,13 +10426,14 @@ "tiny-cookie": "^2.3.2", "v-runtime-template": "^1.10.0", "vuepress": "^1.5.4", + "vuepress-plugin-google-tag-manager": "0.0.5", "vuepress-plugin-sitemap": "^2.3.1" } }, "watchpack": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.0.0.tgz", - "integrity": "sha512-xSdCxxYZWNk3VK13bZRYhsQpfa8Vg63zXG+3pyU8ouqSLRCv4IGXIp9Kr226q6GBkGRlZrST2wwKtjfKz2m7Cg==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.1.0.tgz", + "integrity": "sha512-UjgD1mqjkG99+3lgG36at4wPnUXNvis2v1utwTgQ43C22c4LD71LsYMExdWXh4HZ+RmW+B0t1Vrg2GpXAkTOQw==", "dev": true, "requires": { "glob-to-regexp": "^0.4.1", @@ -10345,9 +10449,9 @@ } }, "watchpack-chokidar2": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.0.tgz", - "integrity": "sha512-9TyfOyN/zLUbA288wZ8IsMZ+6cbzvsNyEzSBp6e/zkifi6xxbl8SmQ/CxQq32k8NNqrdVEVUVSEf56L4rQ/ZxA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz", + "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==", "optional": true, "requires": { "chokidar": "^2.1.8" @@ -10397,9 +10501,9 @@ }, "dependencies": { "acorn": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.1.tgz", - "integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA==" + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", + "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==" }, "anymatch": { "version": "3.1.1", @@ -10427,9 +10531,9 @@ } }, "chokidar": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.2.tgz", - "integrity": "sha512-IZHaDeBeI+sZJRX7lGcXsdzgvZqKv6sECqsbErJA4mHWfpRrD8B97kSFN4cQz6nGBGiuFia1MKR4d6c1o8Cv7A==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.3.tgz", + "integrity": "sha512-DtM3g7juCXQxFVSNPNByEC2+NImtBuxQQvWlHunpJIS5Ocr0lG306cC7FCi7cEA0fzmybPUIl4txBIobk1gGOQ==", "optional": true, "requires": { "anymatch": "~3.1.1", @@ -10439,7 +10543,7 @@ "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", - "readdirp": "~3.4.0" + "readdirp": "~3.5.0" } }, "fill-range": { @@ -10490,9 +10594,9 @@ } }, "readdirp": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.4.0.tgz", - "integrity": "sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ==", + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", + "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", "optional": true, "requires": { "picomatch": "^2.2.1" @@ -10518,14 +10622,14 @@ } }, "watchpack": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.4.tgz", - "integrity": "sha512-aWAgTW4MoSJzZPAicljkO1hsi1oKj/RRq/OJQh2PKI2UKL04c2Bs+MBOB+BBABHTXJpf9mCwHN7ANCvYsvY2sg==", + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz", + "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==", "requires": { "chokidar": "^3.4.1", "graceful-fs": "^4.1.2", "neo-async": "^2.5.0", - "watchpack-chokidar2": "^2.0.0" + "watchpack-chokidar2": "^2.0.1" } } } @@ -10634,23 +10738,53 @@ } }, "debug": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", - "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", "requires": { "ms": "2.1.2" } }, + "find-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "requires": { + "locate-path": "^3.0.0" + } + }, "is-absolute-url": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==" }, + "locate-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", + "requires": { + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" + } + }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, + "p-locate": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", + "requires": { + "p-limit": "^2.0.0" + } + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + }, "schema-utils": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", @@ -10905,9 +11039,9 @@ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" }, "y18n": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", - "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==" + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.1.tgz", + "integrity": "sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ==" }, "yallist": { "version": "3.1.1", diff --git a/docs/package.json b/docs/package.json index e518a244ec..65cb2e3eec 100644 --- a/docs/package.json +++ b/docs/package.json @@ -4,10 +4,10 @@ "description": "Tendermint Core Documentation", "main": "index.js", "dependencies": { - "vuepress-theme-cosmos": "^1.0.173" + "vuepress-theme-cosmos": "^1.0.179" }, "devDependencies": { - "watchpack": "^2.0.0" + "watchpack": "^2.1.0" }, "scripts": { "preserve": "./pre.sh", diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index fa94f3a1ec..a6c1331b8f 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -7,13 +7,10 @@ parent: # Overview -This section dives into the internals of Tendermint the implementation. +This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) -- [Configuration](./configuration.md) - [Running in Production](./running-in-production.md) -- [Metrics](./metrics.md) -- [Validators](./validators.md) - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index 4c78d1b39b..ea3d4b474a 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -1,492 +1,7 @@ --- -order: 3 +order: false --- # Configuration -Tendermint Core can be configured via a TOML file in -`$TMHOME/config/config.toml`. Some of these parameters can be overridden by -command-line flags. For most users, the options in the `##### main base configuration options #####` are intended to be modified while config options -further below are intended for advance power users. - -## Options - -The default configuration file create by `tendermint init` has all -the parameters set with their default values. It will look something -like the file below, however, double check by inspecting the -`config.toml` created with your version of `tendermint` installed: - -```toml -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "anonymous" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,statesync:info,*:error" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://127.0.0.1:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configurattion Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -max_batch_bytes = 10485760 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "3s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" - -``` - -## Empty blocks VS no empty blocks - -### create_empty_blocks = true - -If `create_empty_blocks` is set to `true` in your config, blocks will be -created ~ every second (with default consensus parameters). You can regulate -the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. - -### create_empty_blocks = false - -In this setting, blocks are created when transactions received. - -Note after the block H, Tendermint creates something we call a "proof block" -(only if the application hash changed) H+1. The reason for this is to support -proofs. If you have a transaction in block H that changes the state to X, the -new application hash will only be included in block H+1. If after your -transaction is committed, you want to get a light-client proof for the new state -(X), you need the new block to be committed in order to do that because the new -block has the new application hash for the state X. That's why we make a new -(empty) block if the application hash changes. Otherwise, you won't be able to -make a proof for the new state. - -Plus, if you set `create_empty_blocks_interval` to something other than the -default (`0`), Tendermint will be creating empty blocks even in the absence of -transactions every `create_empty_blocks_interval`. For instance, with -`create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, -Tendermint will only create blocks if there are transactions, or after waiting -30 seconds without receiving any transactions. - -## Consensus timeouts explained - -There's a variety of information about timeouts in [Running in -production](./running-in-production.md) - -You can also find more detailed technical explanation in the spec: [The latest -gossip on BFT consensus](https://arxiv.org/abs/1807.04938). - -```toml -[consensus] -... - -timeout_propose = "3s" -timeout_propose_delta = "500ms" -timeout_prevote = "1s" -timeout_prevote_delta = "500ms" -timeout_precommit = "1s" -timeout_precommit_delta = "500ms" -timeout_commit = "1s" -``` - -Note that in a successful round, the only timeout that we absolutely wait no -matter what is `timeout_commit`. - -Here's a brief summary of the timeouts: - -- `timeout_propose` = how long we wait for a proposal block before prevoting - nil -- `timeout_propose_delta` = how much timeout_propose increases with each round -- `timeout_prevote` = how long we wait after receiving +2/3 prevotes for - anything (ie. not a single block or nil) -- `timeout_prevote_delta` = how much the timeout_prevote increases with each - round -- `timeout_precommit` = how long we wait after receiving +2/3 precommits for - anything (ie. not a single block or nil) -- `timeout_precommit_delta` = how much the timeout_precommit increases with - each round -- `timeout_commit` = how long we wait after committing a block, before starting - on the new height (this gives us a chance to receive some more precommits, - even though we already have +2/3) - -## P2P settings - -This section will cover settings within the p2p section of the `config.toml`. - -- `external_address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port. -- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks -- `persistent_peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added. -- `max_num_inbound_peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). -- `max_num_outbound_peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). -- `unconditional_peer_ids` = is similar to `persistent_peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. -- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. -- `seed_mode` = is used for when node operators want to run their node as a seed node. Seed node's run a variation of the PeX protocol that disconnects from peers after sending them a list of peers to connect to. To minimize the servers usage, it is recommended to set the mempool's size to 0. -- `private_peer_ids` = is a comma separated list of node ids that you would not like exposed to other peers (ie. you will not tell other peers about the private_peer_ids). This can be filled with a validators node id. +This file has moved to the [node_operators section](../node_operators/configuration.md). diff --git a/docs/tendermint-core/fast-sync.md b/docs/tendermint-core/fast-sync.md index 9bbeade381..a36a158c8f 100644 --- a/docs/tendermint-core/fast-sync.md +++ b/docs/tendermint-core/fast-sync.md @@ -28,7 +28,7 @@ has at least one peer and it's height is at least as high as the max reported peer height. See [the IsCaughtUp method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128). -Note: There are three versions of fast sync. We recommend using v0 as v1 and v2 are still in beta. +Note: There are three versions of fast sync. We recommend using v0 as v2 is still in beta. If you would like to use a different version you can do so by changing the version in the `config.toml`: ```toml @@ -39,8 +39,7 @@ Note: There are three versions of fast sync. We recommend using v0 as v1 and v2 # Fast Sync version to use: # 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability +# 2) "v2" - complete redesign of v0, optimized for testability & readability version = "v0" ``` diff --git a/docs/tendermint-core/light-client.md b/docs/tendermint-core/light-client.md index 1b07a51e9b..eb20bd684b 100644 --- a/docs/tendermint-core/light-client.md +++ b/docs/tendermint-core/light-client.md @@ -51,38 +51,4 @@ and return an error. In summary, the light client is not safe when a) more than the trust level of validators are malicious and b) all witnesses are malicious. -## Where to obtain trusted height & hash - -One way to obtain a semi-trusted hash & height is to query multiple full nodes -and compare their hashes: - -```bash -$ curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" -{ - "height": "273", - "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" -} -``` - -## Running a light client as an HTTP proxy server - -Tendermint comes with a built-in `tendermint light` command, which can be used -to run a light client proxy server, verifying Tendermint RPC. All calls that -can be tracked back to a block header by a proof will be verified before -passing them back to the caller. Other than that, it will present the same -interface as a full Tendermint node. - -You can start the light client proxy server by running `tendermint light `, -with a variety of flags to specify the primary node, the witness nodes (which cross-check -the information provided by the primary), the hash and height of the trusted header, -and more. - -For example: - -```bash -$ tendermint light supernova -p tcp://233.123.0.140:26657 \ - -w tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \ - --height=10 --hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57 -``` - -For additional options, run `tendermint light --help`. +Information on how to run a light client is located in the [nodes section](../nodes/light-client.md). diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index 67eb13e0d5..ea3dab6849 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -1,60 +1,7 @@ --- -order: 5 +order: false --- # Metrics -Tendermint can report and serve the Prometheus metrics, which in their turn can -be consumed by Prometheus collector(s). - -This functionality is disabled by default. - -To enable the Prometheus metrics, set `instrumentation.prometheus=true` if your -config file. Metrics will be served under `/metrics` on 26660 port by default. -Listen address can be changed in the config file (see -`instrumentation.prometheus\_listen\_addr`). - -## List of available metrics - -The following metrics are available: - -| **Name** | **Type** | **Tags** | **Description** | -| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | -| consensus_height | Gauge | | Height of the chain | -| consensus_validators | Gauge | | Number of validators | -| consensus_validators_power | Gauge | | Total voting power of all validators | -| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | -| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | -| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | -| consensus_missing_validators | Gauge | | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | | Number of rounds | -| consensus_num_txs | Gauge | | Number of transactions | -| consensus_total_txs | Gauge | | Total number of transactions committed | -| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | -| consensus_latest_block_height | gauge | | /status sync_info number | -| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | -| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | -| consensus_block_size_bytes | Gauge | | Block size in bytes | -| p2p_peers | Gauge | | Number of peers node's connected to | -| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | -| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | -| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | -| mempool_size | Gauge | | Number of uncommitted transactions | -| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | -| mempool_failed_txs | counter | | number of failed transactions | -| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | -| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | - -## Useful queries - -Percentage of missing + byzantine validators: - -```md -((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 -``` +This file has moved to the [node_operators section](../node_operators/metrics.md). diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 41f40641e1..833ad77e56 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -4,6 +4,8 @@ order: 4 # Running in production +If you are building Tendermint from source for use in production, make sure to check out an appropriate Git tag instead of a branch. + ## Database By default, Tendermint uses the `syndtr/goleveldb` package for its in-process @@ -220,8 +222,8 @@ Recovering from data corruption can be hard and time-consuming. Here are two app ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal ``` -3) Search for a "CORRUPTED MESSAGE" line. -4) By looking at the previous message and the message after the corrupted one +3) Search for a "CORRUPTED MESSAGE" line. +4) By looking at the previous message and the message after the corrupted one and looking at the logs, try to rebuild the message. If the consequent messages are marked as corrupted too (this may happen if length header got corrupted or some writes did not make it to the WAL ~ truncation), @@ -232,7 +234,7 @@ Recovering from data corruption can be hard and time-consuming. Here are two app $EDITOR /tmp/corrupted_wal ``` -5) After editing, convert this file back into binary form by running: +5) After editing, convert this file back into binary form by running: ```sh ./scripts/json2wal/json2wal /tmp/corrupted_wal $TMHOME/data/cs.wal/wal diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md index 77c10c4de9..38bf8bf33d 100644 --- a/docs/tendermint-core/state-sync.md +++ b/docs/tendermint-core/state-sync.md @@ -8,41 +8,4 @@ With fast sync a node is downloading all of the data of an application from gene With state sync your node will download data related to the head or near the head of the chain and verify the data. This leads to drastically shorter times for joining a network. -## Using State Sync - -State sync will continuously work in the background to supply nodes with chunked data when bootstrapping. - -> NOTE: Before trying to use state sync, see if the application you are operating a node for supports it. - -Under the state sync section in `config.toml` you will find multiple settings that need to be configured in order for your node to use state sync. - -Lets breakdown the settings: - -- `enable`: Enable is to inform the node that you will be using state sync to bootstrap your node. -- `rpc_servers`: RPC servers are needed because state sync utilizes the light client for verification. - - 2 servers are required, more is always helpful. -- `temp_dir`: Temporary directory is store the chunks in the machines local storage, If nothing is set it will create a directory in `/tmp` - -The next information you will need to acquire it through publicly exposed RPC's or a block explorer which you trust. - -- `trust_height`: Trusted height defines at which height your node should trust the chain. -- `trust_hash`: Trusted hash is the hash in the `BlockID` corresponding to the trusted height. -- `trust_period`: Trust period is the period in which headers can be verified. - > :warning: This value should be significantly smaller than the unbonding period. - -If you are relying on publicly exposed RPC's to get the need information, you can use `curl`. - -Example: - -```bash -curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" -``` - -The response will be: - -```json -{ - "height": "273", - "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" -} -``` +Information on how to configure state sync is located in the [nodes section](../nodes/state-sync.md) diff --git a/docs/tendermint-core/validators.md b/docs/tendermint-core/validators.md index 084fe27fa6..2e2c434347 100644 --- a/docs/tendermint-core/validators.md +++ b/docs/tendermint-core/validators.md @@ -1,114 +1,7 @@ --- -order: 6 +order: false --- # Validators -Validators are responsible for committing new blocks in the blockchain. -These validators participate in the consensus protocol by broadcasting -_votes_ which contain cryptographic signatures signed by each -validator's private key. - -Some Proof-of-Stake consensus algorithms aim to create a "completely" -decentralized system where all stakeholders (even those who are not -always available online) participate in the committing of blocks. -Tendermint has a different approach to block creation. Validators are -expected to be online, and the set of validators is permissioned/curated -by some external process. Proof-of-stake is not required, but can be -implemented on top of Tendermint consensus. That is, validators may be -required to post collateral on-chain, off-chain, or may not be required -to post any collateral at all. - -Validators have a cryptographic key-pair and an associated amount of -"voting power". Voting power need not be the same. - -## Becoming a Validator - -There are two ways to become validator. - -1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) -2. The ABCI app responds to the EndBlock message with changes to the - existing validator set. - -## Setting up a Validator - -When setting up a validator there are countless ways to configure your setup. This guide is aimed at showing one of them, the sentry node design. This design is mainly for DDOS prevention. - -### Network Layout - -![ALT Network Layout](./sentry_layout.png) - -The diagram is based on AWS, other cloud providers will have similar solutions to design a solution. Running nodes is not limited to cloud providers, you can run nodes on bare metal systems as well. The architecture will be the same no matter which setup you decide to go with. - -The proposed network diagram is similar to the classical backend/frontend separation of services in a corporate environment. The “backend” in this case is the private network of the validator in the data center. The data center network might involve multiple subnets, firewalls and redundancy devices, which is not detailed on this diagram. The important point is that the data center allows direct connectivity to the chosen cloud environment. Amazon AWS has “Direct Connect”, while Google Cloud has “Partner Interconnect”. This is a dedicated connection to the cloud provider (usually directly to your virtual private cloud instance in one of the regions). - -All sentry nodes (the “frontend”) connect to the validator using this private connection. The validator does not have a public IP address to provide its services. - -Amazon has multiple availability zones within a region. One can install sentry nodes in other regions too. In this case the second, third and further regions need to have a private connection to the validator node. This can be achieved by VPC Peering (“VPC Network Peering” in Google Cloud). In this case, the second, third and further region sentry nodes will be directed to the first region and through the direct connect to the data center, arriving to the validator. - -A more persistent solution (not detailed on the diagram) is to have multiple direct connections to different regions from the data center. This way VPC Peering is not mandatory, although still beneficial for the sentry nodes. This overcomes the risk of depending on one region. It is more costly. - -### Local Configuration - -![ALT Local Configuration](./local_config.png) - -The validator will only talk to the sentry that are provided, the sentry nodes will communicate to the validator via a secret connection and the rest of the network through a normal connection. The sentry nodes do have the option of communicating with each other as well. - -When initializing nodes there are five parameters in the `config.toml` that may need to be altered. - -- `pex:` boolean. This turns the peer exchange reactor on or off for a node. When `pex=false`, only the `persistent_peers` list is available for connection. -- `persistent_peers:` a comma separated list of `nodeID@ip:port` values that define a list of peers that are expected to be online at all times. This is necessary at first startup because by setting `pex=false` the node will not be able to join the network. -- `unconditional_peer_ids:` comma separated list of nodeID's. These nodes will be connected to no matter the limits of inbound and outbound peers. This is useful for when sentry nodes have full address books. -- `private_peer_ids:` comma separated list of nodeID's. These nodes will not be gossiped to the network. This is an important field as you do not want your validator IP gossiped to the network. -- `addr_book_strict:` boolean. By default nodes with a routable address will be considered for connection. If this setting is turned off (false), non-routable IP addresses, like addresses in a private network can be added to the address book. -- `double_sign_check_height` int64 height. How many blocks to look back to check existence of the node's consensus votes before joining consensus When non-zero, the node will panic upon restart if the same consensus key was used to sign {double_sign_check_height} last blocks. So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. - -#### Validator Node Configuration - -| Config Option | Setting | -| ------------------------ | -------------------------- | -| pex | false | -| persistent_peers | list of sentry nodes | -| private_peer_ids | none | -| unconditional_peer_ids | optionally sentry node IDs | -| addr_book_strict | false | -| double_sign_check_height | 10 | - -The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. - -#### Sentry Node Configuration - -| Config Option | Setting | -| ---------------------- | --------------------------------------------- | -| pex | true | -| persistent_peers | validator node, optionally other sentry nodes | -| private_peer_ids | validator node ID | -| unconditional_peer_ids | validator node ID, optionally sentry node IDs | -| addr_book_strict | false | - -The sentry nodes should be able to talk to the entire network hence why `pex=true`. The persistent peers of a sentry node will be the validator, and optionally other sentry nodes. The sentry nodes should make sure that they do not gossip the validator's ip, to do this you must put the validators nodeID as a private peer. The unconditional peer IDs will be the validator ID and optionally other sentry nodes. - -> Note: Do not forget to secure your node's firewalls when setting them up. - -More Information can be found at these links: - -- -- - -### Validator keys - -Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM. - -Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs. - -## Committing a Block - -> **+2/3 is short for "more than 2/3"** - -A block is committed when +2/3 of the validator set sign [precommit -votes](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#vote) for that block at the same `round`. -The +2/3 set of precommit votes is called a -[_commit_](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#commit). While any +2/3 set of -precommits for the same block at the same height&round can serve as -validation, the canonical commit is included in the next block (see -[LastCommit](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#lastcommit)). +This file has moved to the [node_operators section](../node_operators/validators.md). diff --git a/docs/tools/README.md b/docs/tools/README.md index 720022f96e..b1e3d2319e 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: Tools + title: Tooling order: 6 --- diff --git a/docs/tools/debugging.md b/docs/tools/debuging/README.md similarity index 95% rename from docs/tools/debugging.md rename to docs/tools/debuging/README.md index 17fa0ec117..2932f6e869 100644 --- a/docs/tools/debugging.md +++ b/docs/tools/debuging/README.md @@ -1,6 +1,13 @@ +--- +order: 1 +parent: + title: Debugging + order: 1 +--- + # Debugging -## tendermint debug kill +## Tendermint debug kill Tendermint comes with a `debug` sub-command that allows you to kill a live Tendermint process while collecting useful information in a compressed archive. diff --git a/docs/tools/debuging/pro.md b/docs/tools/debuging/pro.md new file mode 100644 index 0000000000..9a48034dde --- /dev/null +++ b/docs/tools/debuging/pro.md @@ -0,0 +1,77 @@ +--- +order: 2 +--- + +# Debug Like A Pro + +## Intro + +Tendermint Core is a fairly robust BFT replication engine. Unfortunately, as with other software, failures sometimes do happen. The question is then “what do you do” when the system deviates from the expected behavior. + +The first response is usually to take a look at the logs. By default, Tendermint writes logs to standard output ¹. + +```sh +I[2020-05-29|03:03:16.145] Committed state module=state height=2282 txs=0 appHash=0A27BC6B0477A8A50431704D2FB90DB99CBFCB67A2924B5FBF6D4E78538B67C1I[2020-05-29|03:03:21.690] Executed block module=state height=2283 validTxs=0 invalidTxs=0I[2020-05-29|03:03:21.698] Committed state module=state height=2283 txs=0 appHash=EB4E409D3AF4095A0757C806BF160B3DE4047AC0416F584BFF78FC0D44C44BF3I[2020-05-29|03:03:27.994] Executed block module=state height=2284 validTxs=0 invalidTxs=0I[2020-05-29|03:03:28.003] Committed state module=state height=2284 txs=0 appHash=3FC9237718243A2CAEE3A8B03AE05E1FC3CA28AEFE8DF0D3D3DCE00D87462866E[2020-05-29|03:03:32.975] enterPrevote: ProposalBlock is invalid module=consensus height=2285 round=0 err="wrong signature (#35): C683341000384EA00A345F9DB9608292F65EE83B51752C0A375A9FCFC2BD895E0792A0727925845DC13BA0E208C38B7B12B2218B2FE29B6D9135C53D7F253D05" +``` + +If you’re running a validator in production, it might be a good idea to forward the logs for analysis using filebeat or similar tools. Also, you can set up a notification in case of any errors. + +The logs should give you the basic idea of what has happened. In the worst-case scenario, the node has stalled and does not produce any logs (or simply panicked). + +The next step is to call /status, /net_info, /consensus_state and /dump_consensus_state RPC endpoints. + +```sh +curl http://:26657/status$ curl http://:26657/net_info$ curl http://:26657/consensus_state$ curl http://:26657/dump_consensus_state +``` + +Please note that /consensus_state and /dump_consensus_state may not return a result if the node has stalled (since they try to get a hold of the consensus mutex). + +The output of these endpoints contains all the information needed for developers to understand the state of the node. It will give you an idea if the node is lagging behind the network, how many peers it’s connected to, and what the latest consensus state is. + +At this point, if the node is stalled and you want to restart it, the best thing you can do is to kill it with -6 signal: + +```sh +kill -6 +``` + +which will dump the list of the currently running goroutines. The list is super useful when debugging a deadlock. + +`PID` is the Tendermint’s process ID. You can find it out by running `ps -a | grep tendermint | awk ‘{print $1}’` + +## Tendermint debug kill + +To ease the burden of collecting different pieces of data Tendermint Core (since v0.33 version) provides the Tendermint debug kill tool, which will do all of the above steps for you, wrapping everything into a nice archive file. + +```sh +tendermint debug kill — home= +``` + +Here’s the official documentation page — + +If you’re using a process supervisor, like systemd, it will restart the Tendermint automatically. We strongly advise you to have one in production. If not, you will need to restart the node by hand. + +Another advantage of using Tendermint debug is that the same archive file can be given to Tendermint Core developers, in cases where you think there’s a software issue. + +## Tendermint debug dump + +Okay, but what if the node has not stalled, but its state is degrading over time? Tendermint debug dump to the rescue! + +```sh +tendermint debug dump — home= +``` + +It won’t kill the node, but it will gather all of the above data and package it into an archive file. Plus, it will also make a heap dump, which should help if Tendermint is leaking memory. + +At this point, depending on how severe the degradation is, you may want to restart the process. + +## Outro + +We’re hoping that the `tendermint debug` subcommand will become de facto the first response to any accidents. + +Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` yet? + +Join our chat, where we discuss the current issues and future improvements. + +— + +[1]: Of course, you’re free to redirect the Tendermint’s output to a file or forward it to another server. diff --git a/docs/versions b/docs/versions index f3f5734d1c..cabe8a0019 100644 --- a/docs/versions +++ b/docs/versions @@ -1,3 +1,4 @@ -v0.32 v0.32 +v0.32.x v0.32 cyrus/0.33-version v0.33 +v0.34.x v0.34 master master diff --git a/evidence/mocks/block_store.go b/evidence/mocks/block_store.go index 26eaf411c5..ee4505d06b 100644 --- a/evidence/mocks/block_store.go +++ b/evidence/mocks/block_store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks diff --git a/evidence/pool.go b/evidence/pool.go index 780694bf4b..c03708e78b 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "reflect" + "sort" "sync" "sync/atomic" "time" @@ -13,10 +13,8 @@ import ( gogotypes "github.com/gogo/protobuf/types" dbm "github.com/tendermint/tm-db" - abci "github.com/lazyledger/lazyledger-core/abci/types" clist "github.com/lazyledger/lazyledger-core/libs/clist" "github.com/lazyledger/lazyledger-core/libs/log" - evproto "github.com/lazyledger/lazyledger-core/proto/tendermint/evidence" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" sm "github.com/lazyledger/lazyledger-core/state" "github.com/lazyledger/lazyledger-core/types" @@ -83,7 +81,7 @@ func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, // PendingEvidence is used primarily as part of block proposal and returns up to maxNum of uncommitted evidence. func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { - if atomic.LoadUint32(&evpool.evidenceSize) == 0 { + if evpool.Size() == 0 { return []types.Evidence{}, 0 } evidence, size, err := evpool.listEvidence(baseKeyPending, maxBytes) @@ -94,7 +92,7 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { } // Update pulls the latest state to be used for expiration and evidence params and then prunes all expired evidence -func (evpool *Pool) Update(state sm.State) { +func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { // sanity check if state.LastBlockHeight <= evpool.state.LastBlockHeight { panic(fmt.Sprintf( @@ -109,8 +107,10 @@ func (evpool *Pool) Update(state sm.State) { // update the state evpool.updateState(state) + evpool.markEvidenceAsCommitted(ev) + // prune pending evidence when it has expired. This also updates when the next evidence will expire - if atomic.LoadUint32(&evpool.evidenceSize) > 0 && state.LastBlockHeight > evpool.pruningHeight && + if evpool.Size() > 0 && state.LastBlockHeight > evpool.pruningHeight && state.LastBlockTime.After(evpool.pruningTime) { evpool.pruningHeight, evpool.pruningTime = evpool.removeExpiredPendingEvidence() } @@ -122,17 +122,26 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { // We have already verified this piece of evidence - no need to do it again if evpool.isPending(ev) { - return errors.New("evidence already verified and added") + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) + return nil + } + + // check that the evidence isn't already committed + if evpool.isCommitted(ev) { + // this can happen if the peer that sent us the evidence is behind so we shouldn't + // punish the peer. + evpool.logger.Debug("Evidence was already committed, ignoring this one", "ev", ev) + return nil } // 1) Verify against state. - evInfo, err := evpool.verify(ev) + err := evpool.verify(ev) if err != nil { return types.NewErrInvalidEvidence(ev, err) } // 2) Save to store. - if err := evpool.addPendingEvidence(evInfo); err != nil { + if err := evpool.addPendingEvidence(ev); err != nil { return fmt.Errorf("can't add evidence to pending list: %w", err) } @@ -144,38 +153,20 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { return nil } -// AddEvidenceFromConsensus should be exposed only to the consensus so it can add evidence to the pool -// directly without the need for verification. -func (evpool *Pool) AddEvidenceFromConsensus(ev types.Evidence, time time.Time, valSet *types.ValidatorSet) error { - var ( - vals []*types.Validator - totalPower int64 - ) +// AddEvidenceFromConsensus should be exposed only to the consensus reactor so it can add evidence +// to the pool directly without the need for verification. +func (evpool *Pool) AddEvidenceFromConsensus(ev types.Evidence) error { + // we already have this evidence, log this but don't return an error. if evpool.isPending(ev) { - return errors.New("evidence already verified and added") // we already have this evidence - } - - switch ev := ev.(type) { - case *types.DuplicateVoteEvidence: - _, val := valSet.GetByAddress(ev.VoteA.ValidatorAddress) - vals = append(vals, val) - totalPower = valSet.TotalVotingPower() - default: - return fmt.Errorf("unrecognized evidence type: %T", ev) - } - - evInfo := &info{ - Evidence: ev, - Time: time, - Validators: vals, - TotalVotingPower: totalPower, + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) + return nil } - if err := evpool.addPendingEvidence(evInfo); err != nil { + if err := evpool.addPendingEvidence(ev); err != nil { return fmt.Errorf("can't add evidence to pending list: %w", err) } - + // add evidence to be gossiped with peers evpool.evidenceList.PushBack(ev) evpool.logger.Info("Verified new evidence of byzantine behavior", "evidence", ev) @@ -194,13 +185,20 @@ func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { ok := evpool.fastCheck(ev) if !ok { - evInfo, err := evpool.verify(ev) + // check that the evidence isn't already committed + if evpool.isCommitted(ev) { + return &types.ErrInvalidEvidence{Evidence: ev, Reason: errors.New("evidence was already committed")} + } + + err := evpool.verify(ev) if err != nil { return &types.ErrInvalidEvidence{Evidence: ev, Reason: err} } - if err := evpool.addPendingEvidence(evInfo); err != nil { - evpool.logger.Error("Can't add evidence to pending list", "err", err, "evInfo", evInfo) + if err := evpool.addPendingEvidence(ev); err != nil { + // Something went wrong with adding the evidence but we already know it is valid + // hence we log an error and continue + evpool.logger.Error("Can't add evidence to pending list", "err", err, "ev", ev) } evpool.logger.Info("Verified new evidence of byzantine behavior", "evidence", ev) @@ -218,85 +216,6 @@ func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { return nil } -// ABCIEvidence processes all the evidence in the block, marking it as committed and removing it -// from the pending database. It then forms the individual abci evidence that will be passed back to -// the application. -func (evpool *Pool) ABCIEvidence(height int64, evidence []types.Evidence) []abci.Evidence { - // make a map of committed evidence to remove from the clist - blockEvidenceMap := make(map[string]struct{}, len(evidence)) - abciEvidence := make([]abci.Evidence, 0) - for _, ev := range evidence { - - // get entire evidence info from pending list - infoBytes, err := evpool.evidenceStore.Get(keyPending(ev)) - if err != nil { - evpool.logger.Error("Unable to retrieve evidence to pass to ABCI. "+ - "Evidence pool should have seen this evidence before", - "evidence", ev, "err", err) - continue - } - var infoProto evproto.Info - err = infoProto.Unmarshal(infoBytes) - if err != nil { - evpool.logger.Error("Decoding evidence info failed", "err", err, "height", ev.Height(), "hash", ev.Hash()) - continue - } - evInfo, err := infoFromProto(&infoProto) - if err != nil { - evpool.logger.Error("Converting evidence info from proto failed", "err", err, "height", ev.Height(), - "hash", ev.Hash()) - continue - } - - var evType abci.EvidenceType - switch ev.(type) { - case *types.DuplicateVoteEvidence: - evType = abci.EvidenceType_DUPLICATE_VOTE - case *types.LightClientAttackEvidence: - evType = abci.EvidenceType_LIGHT_CLIENT_ATTACK - default: - evpool.logger.Error("Unknown evidence type", "T", reflect.TypeOf(ev)) - continue - } - for _, val := range evInfo.Validators { - abciEv := abci.Evidence{ - Type: evType, - Validator: types.TM2PB.Validator(val), - Height: ev.Height(), - Time: evInfo.Time, - TotalVotingPower: evInfo.TotalVotingPower, - } - abciEvidence = append(abciEvidence, abciEv) - evpool.logger.Info("Created ABCI evidence", "ev", abciEv) - } - - // we can now remove the evidence from the pending list and the clist that we use for gossiping - evpool.removePendingEvidence(ev) - blockEvidenceMap[evMapKey(ev)] = struct{}{} - - // Add evidence to the committed list - // As the evidence is stored in the block store we only need to record the height that it was saved at. - key := keyCommitted(ev) - - h := gogotypes.Int64Value{Value: height} - evBytes, err := proto.Marshal(&h) - if err != nil { - panic(err) - } - - if err := evpool.evidenceStore.Set(key, evBytes); err != nil { - evpool.logger.Error("Unable to add committed evidence", "err", err) - } - } - - // remove committed evidence from the clist - if len(blockEvidenceMap) != 0 { - evpool.removeEvidenceFromList(blockEvidenceMap) - } - - return abciEvidence -} - // EvidenceFront goes to the first evidence in the clist func (evpool *Pool) EvidenceFront() *clist.CElement { return evpool.evidenceList.Front() @@ -312,6 +231,11 @@ func (evpool *Pool) SetLogger(l log.Logger) { evpool.logger = l } +// Size returns the number of evidence in the pool. +func (evpool *Pool) Size() uint32 { + return atomic.LoadUint32(&evpool.evidenceSize) +} + // State returns the current state of the evpool. func (evpool *Pool) State() sm.State { evpool.mtx.Lock() @@ -321,106 +245,59 @@ func (evpool *Pool) State() sm.State { //-------------------------------------------------------------------------- -// Info is a wrapper around the evidence that the evidence pool receives with extensive -// information of what validators were malicious, the time of the attack and the total voting power -// This is saved as a form of cache so that the evidence pool can easily produce the ABCI Evidence -// needed to be sent to the application. -type info struct { - Evidence types.Evidence - Time time.Time - Validators []*types.Validator - TotalVotingPower int64 - ByteSize int64 -} - -// ToProto encodes into protobuf -func (ei info) ToProto() (*evproto.Info, error) { - evpb, err := types.EvidenceToProto(ei.Evidence) - if err != nil { - return nil, err - } - - valsProto := make([]*tmproto.Validator, len(ei.Validators)) - for i := 0; i < len(ei.Validators); i++ { - valp, err := ei.Validators[i].ToProto() - if err != nil { - return nil, err - } - valsProto[i] = valp - } - - return &evproto.Info{ - Evidence: *evpb, - Time: ei.Time, - Validators: valsProto, - TotalVotingPower: ei.TotalVotingPower, - }, nil -} - -// InfoFromProto decodes from protobuf into Info -func infoFromProto(proto *evproto.Info) (info, error) { - if proto == nil { - return info{}, errors.New("nil evidence info") - } - - ev, err := types.EvidenceFromProto(&proto.Evidence) - if err != nil { - return info{}, err - } - - vals := make([]*types.Validator, len(proto.Validators)) - for i := 0; i < len(proto.Validators); i++ { - val, err := types.ValidatorFromProto(proto.Validators[i]) - if err != nil { - return info{}, err - } - vals[i] = val - } - - return info{ - Evidence: ev, - Time: proto.Time, - Validators: vals, - TotalVotingPower: proto.TotalVotingPower, - ByteSize: int64(proto.Evidence.Size()), - }, nil - -} - -//-------------------------------------------------------------------------- - // fastCheck leverages the fact that the evidence pool may have already verified the evidence to see if it can // quickly conclude that the evidence is already valid. func (evpool *Pool) fastCheck(ev types.Evidence) bool { - key := keyPending(ev) if lcae, ok := ev.(*types.LightClientAttackEvidence); ok { + key := keyPending(ev) evBytes, err := evpool.evidenceStore.Get(key) if evBytes == nil { // the evidence is not in the nodes pending list return false } if err != nil { - evpool.logger.Error("Failed to load evidence", "err", err, "evidence", lcae) + evpool.logger.Error("Failed to load light client attack evidence", "err", err, "key(height/hash)", key) return false } - evInfo, err := bytesToInfo(evBytes) + var trustedPb tmproto.LightClientAttackEvidence + err = trustedPb.Unmarshal(evBytes) if err != nil { - evpool.logger.Error("Failed to convert evidence from proto", "err", err, "evidence", lcae) + evpool.logger.Error("Failed to convert light client attack evidence from bytes", + "err", err, "key(height/hash)", key) return false } - // ensure that all the validators that the evidence pool have found to be malicious - // are present in the list of commit signatures in the conflicting block - OUTER: - for _, sig := range lcae.ConflictingBlock.Commit.Signatures { - for _, val := range evInfo.Validators { - if bytes.Equal(val.Address, sig.ValidatorAddress) { - continue OUTER - } - } - // a validator we know is malicious is not included in the commit - evpool.logger.Info("Fast check failed: a validator we know is malicious is not " + - "in the commit sigs. Reverting to full verification") + trustedEv, err := types.LightClientAttackEvidenceFromProto(&trustedPb) + if err != nil { + evpool.logger.Error("Failed to convert light client attack evidence from protobuf", + "err", err, "key(height/hash)", key) + return false + } + // ensure that all the byzantine validators that the evidence pool has match the byzantine validators + // in this evidence + if trustedEv.ByzantineValidators == nil && lcae.ByzantineValidators != nil { + return false + } + + if len(trustedEv.ByzantineValidators) != len(lcae.ByzantineValidators) { return false } + + byzValsCopy := make([]*types.Validator, len(lcae.ByzantineValidators)) + for i, v := range lcae.ByzantineValidators { + byzValsCopy[i] = v.Copy() + } + + // ensure that both validator arrays are in the same order + sort.Sort(types.ValidatorsByVotingPower(byzValsCopy)) + + for idx, val := range trustedEv.ByzantineValidators { + if !bytes.Equal(byzValsCopy[idx].Address, val.Address) { + return false + } + if byzValsCopy[idx].VotingPower != val.VotingPower { + return false + } + } + return true } @@ -460,8 +337,8 @@ func (evpool *Pool) isPending(evidence types.Evidence) bool { return ok } -func (evpool *Pool) addPendingEvidence(evInfo *info) error { - evpb, err := evInfo.ToProto() +func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { + evpb, err := types.EvidenceToProto(ev) if err != nil { return fmt.Errorf("unable to convert to proto, err: %w", err) } @@ -471,7 +348,7 @@ func (evpool *Pool) addPendingEvidence(evInfo *info) error { return fmt.Errorf("unable to marshal evidence: %w", err) } - key := keyPending(evInfo.Evidence) + key := keyPending(ev) err = evpool.evidenceStore.Set(key, evBytes) if err != nil { @@ -491,31 +368,80 @@ func (evpool *Pool) removePendingEvidence(evidence types.Evidence) { } } +// markEvidenceAsCommitted processes all the evidence in the block, marking it as +// committed and removing it from the pending database. +func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList) { + blockEvidenceMap := make(map[string]struct{}, len(evidence)) + for _, ev := range evidence { + if evpool.isPending(ev) { + evpool.removePendingEvidence(ev) + blockEvidenceMap[evMapKey(ev)] = struct{}{} + } + + // Add evidence to the committed list. As the evidence is stored in the block store + // we only need to record the height that it was saved at. + key := keyCommitted(ev) + + h := gogotypes.Int64Value{Value: ev.Height()} + evBytes, err := proto.Marshal(&h) + if err != nil { + evpool.logger.Error("failed to marshal committed evidence", "err", err, "key(height/hash)", key) + continue + } + + if err := evpool.evidenceStore.Set(key, evBytes); err != nil { + evpool.logger.Error("Unable to save committed evidence", "err", err, "key(height/hash)", key) + } + } + + // remove committed evidence from the clist + if len(blockEvidenceMap) != 0 { + evpool.removeEvidenceFromList(blockEvidenceMap) + } +} + // listEvidence retrieves lists evidence from oldest to newest within maxBytes. // If maxBytes is -1, there's no cap on the size of returned evidence. func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Evidence, int64, error) { - var totalSize int64 - var evidence []types.Evidence + var ( + evSize int64 + totalSize int64 + evidence []types.Evidence + evList tmproto.EvidenceList // used for calculating the bytes size + ) + iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{prefixKey}) if err != nil { return nil, totalSize, fmt.Errorf("database error: %v", err) } defer iter.Close() for ; iter.Valid(); iter.Next() { - evInfo, err := bytesToInfo(iter.Value()) + var evpb tmproto.Evidence + err := evpb.Unmarshal(iter.Value()) if err != nil { - return nil, totalSize, err + return evidence, totalSize, err + } + evList.Evidence = append(evList.Evidence, evpb) + evSize = int64(evList.Size()) + if maxBytes != -1 && evSize > maxBytes { + if err := iter.Error(); err != nil { + return evidence, totalSize, err + } + return evidence, totalSize, nil } - totalSize += evInfo.ByteSize - - if maxBytes != -1 && totalSize > maxBytes { - return evidence, totalSize - evInfo.ByteSize, nil + ev, err := types.EvidenceFromProto(&evpb) + if err != nil { + return nil, totalSize, err } - evidence = append(evidence, evInfo.Evidence) + totalSize = evSize + evidence = append(evidence, ev) } + if err := iter.Error(); err != nil { + return evidence, totalSize, err + } return evidence, totalSize, nil } @@ -528,22 +454,22 @@ func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { defer iter.Close() blockEvidenceMap := make(map[string]struct{}) for ; iter.Valid(); iter.Next() { - evInfo, err := bytesToInfo(iter.Value()) + ev, err := bytesToEv(iter.Value()) if err != nil { evpool.logger.Error("Error in transition evidence from protobuf", "err", err) continue } - if !evpool.isExpired(evInfo.Evidence.Height(), evInfo.Time) { + if !evpool.isExpired(ev.Height(), ev.Time()) { if len(blockEvidenceMap) != 0 { evpool.removeEvidenceFromList(blockEvidenceMap) } // return the height and time with which this evidence will have expired so we know when to prune next - return evInfo.Evidence.Height() + evpool.State().ConsensusParams.Evidence.MaxAgeNumBlocks + 1, - evInfo.Time.Add(evpool.State().ConsensusParams.Evidence.MaxAgeDuration).Add(time.Second) + return ev.Height() + evpool.State().ConsensusParams.Evidence.MaxAgeNumBlocks + 1, + ev.Time().Add(evpool.State().ConsensusParams.Evidence.MaxAgeDuration).Add(time.Second) } - evpool.removePendingEvidence(evInfo.Evidence) - blockEvidenceMap[evMapKey(evInfo.Evidence)] = struct{}{} + evpool.removePendingEvidence(ev) + blockEvidenceMap[evMapKey(ev)] = struct{}{} } // We either have no pending evidence or all evidence has expired if len(blockEvidenceMap) != 0 { @@ -571,14 +497,14 @@ func (evpool *Pool) updateState(state sm.State) { evpool.state = state } -func bytesToInfo(evBytes []byte) (info, error) { - var evpb evproto.Info +func bytesToEv(evBytes []byte) (types.Evidence, error) { + var evpb tmproto.Evidence err := evpb.Unmarshal(evBytes) if err != nil { - return info{}, err + return &types.DuplicateVoteEvidence{}, err } - return infoFromProto(&evpb) + return types.EvidenceFromProto(&evpb) } func evMapKey(ev types.Evidence) string { diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 1eea24d764..0dfdb4ebbd 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -11,7 +11,6 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/lazyledger/lazyledger-core/abci/types" "github.com/lazyledger/lazyledger-core/evidence" "github.com/lazyledger/lazyledger-core/evidence/mocks" "github.com/lazyledger/lazyledger-core/libs/log" @@ -45,7 +44,7 @@ func TestEvidencePoolBasic(t *testing.T) { blockStore = &mocks.BlockStore{} ) - valSet, privVals := types.RandValidatorSet(3, 10) + valSet, privVals := types.RandValidatorSet(1, 10) blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, @@ -83,12 +82,13 @@ func TestEvidencePoolBasic(t *testing.T) { next := pool.EvidenceFront() assert.Equal(t, ev, next.Value.(types.Evidence)) - evs, size = pool.PendingEvidence(defaultEvidenceMaxBytes) + const evidenceBytes int64 = 372 + evs, size = pool.PendingEvidence(evidenceBytes) assert.Equal(t, 1, len(evs)) - assert.Equal(t, int64(357), size) // check that the size of the single evidence in bytes is correct + assert.Equal(t, evidenceBytes, size) // check that the size of the single evidence in bytes is correct // shouldn't be able to add evidence twice - assert.Error(t, pool.AddEvidence(ev)) + assert.NoError(t, pool.AddEvidence(ev)) evs, _ = pool.PendingEvidence(defaultEvidenceMaxBytes) assert.Equal(t, 1, len(evs)) @@ -108,7 +108,7 @@ func TestAddExpiredEvidence(t *testing.T) { blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta { if h == height || h == expiredHeight { - return &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime.Add(time.Duration(height) * time.Minute)}} + return &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}} } return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}} }) @@ -127,6 +127,7 @@ func TestAddExpiredEvidence(t *testing.T) { {height - 1, expiredEvidenceTime, false, "valid evidence (despite old time)"}, {expiredHeight - 1, expiredEvidenceTime, true, "evidence from height 1 (created at: 2019-01-01 00:00:00 +0000 UTC) is too old"}, + {height, defaultEvidenceTime.Add(1 * time.Minute), true, "evidence time and block time is different"}, } for _, tc := range testCases { @@ -147,17 +148,16 @@ func TestAddEvidenceFromConsensus(t *testing.T) { var height int64 = 10 pool, val := defaultTestPool(height) ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, val, evidenceChainID) - err := pool.AddEvidenceFromConsensus(ev, defaultEvidenceTime, - types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(2)})) + err := pool.AddEvidenceFromConsensus(ev) assert.NoError(t, err) next := pool.EvidenceFront() assert.Equal(t, ev, next.Value.(types.Evidence)) + // shouldn't be able to submit the same evidence twice - err = pool.AddEvidenceFromConsensus(ev, defaultEvidenceTime.Add(-1*time.Second), - types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(3)})) - if assert.Error(t, err) { - assert.Equal(t, "evidence already verified and added", err.Error()) - } + err = pool.AddEvidenceFromConsensus(ev) + assert.NoError(t, err) + evs, _ := pool.PendingEvidence(defaultEvidenceMaxBytes) + assert.Equal(t, 1, len(evs)) } func TestEvidencePoolUpdate(t *testing.T) { @@ -166,11 +166,12 @@ func TestEvidencePoolUpdate(t *testing.T) { state := pool.State() // create new block (no need to save it to blockStore) - prunedEv := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultEvidenceTime, + prunedEv := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID) err := pool.AddEvidence(prunedEv) require.NoError(t, err) - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, val, evidenceChainID) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime.Add(21*time.Minute), + val, evidenceChainID) lastCommit := makeCommit(height, val.PrivKey.PubKey().Address()) block := types.MakeBlock(height+1, []types.Tx{}, []types.Evidence{ev}, nil, types.Messages{}, lastCommit) // update state (partially) @@ -179,22 +180,7 @@ func TestEvidencePoolUpdate(t *testing.T) { err = pool.CheckEvidence(types.EvidenceList{ev}) require.NoError(t, err) - byzVals := pool.ABCIEvidence(block.Height, block.Evidence.Evidence) - expectedByzVals := []abci.Evidence{ - { - Type: abci.EvidenceType_DUPLICATE_VOTE, - Validator: types.TM2PB.Validator(val.ExtractIntoValidator(10)), - Height: height, - Time: defaultEvidenceTime.Add(time.Duration(height) * time.Minute), - TotalVotingPower: 10, - }, - } - assert.Equal(t, expectedByzVals, byzVals) - evList, _ := pool.PendingEvidence(defaultEvidenceMaxBytes) - assert.Equal(t, 1, len(evList)) - - pool.Update(state) - + pool.Update(state, block.Evidence.Evidence) // a) Update marks evidence as committed so pending evidence should be empty evList, evSize := pool.PendingEvidence(defaultEvidenceMaxBytes) assert.Empty(t, evList) @@ -205,14 +191,13 @@ func TestEvidencePoolUpdate(t *testing.T) { if assert.Error(t, err) { assert.Equal(t, "evidence was already committed", err.(*types.ErrInvalidEvidence).Reason.Error()) } - - assert.Empty(t, pool.ABCIEvidence(height, []types.Evidence{})) } func TestVerifyPendingEvidencePasses(t *testing.T) { var height int64 = 1 pool, val := defaultTestPool(height) - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, val, evidenceChainID) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime.Add(1*time.Minute), + val, evidenceChainID) err := pool.AddEvidence(ev) require.NoError(t, err) @@ -223,20 +208,27 @@ func TestVerifyPendingEvidencePasses(t *testing.T) { func TestVerifyDuplicatedEvidenceFails(t *testing.T) { var height int64 = 1 pool, val := defaultTestPool(height) - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, val, evidenceChainID) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime.Add(1*time.Minute), + val, evidenceChainID) err := pool.CheckEvidence(types.EvidenceList{ev, ev}) if assert.Error(t, err) { assert.Equal(t, "duplicate evidence", err.(*types.ErrInvalidEvidence).Reason.Error()) } } -// check that +// check that valid light client evidence is correctly validated and stored in +// evidence pool func TestCheckEvidenceWithLightClientAttack(t *testing.T) { - nValidators := 5 - conflictingVals, conflictingPrivVals := types.RandValidatorSet(nValidators, 10) - trustedHeader := makeHeaderRandom(10) + var ( + nValidators = 5 + validatorPower int64 = 10 + height int64 = 10 + ) + conflictingVals, conflictingPrivVals := types.RandValidatorSet(nValidators, validatorPower) + trustedHeader := makeHeaderRandom(height) + trustedHeader.Time = defaultEvidenceTime - conflictingHeader := makeHeaderRandom(10) + conflictingHeader := makeHeaderRandom(height) conflictingHeader.ValidatorsHash = conflictingVals.Hash() trustedHeader.ValidatorsHash = conflictingHeader.ValidatorsHash @@ -248,8 +240,8 @@ func TestCheckEvidenceWithLightClientAttack(t *testing.T) { // for simplicity we are simulating a duplicate vote attack where all the validators in the // conflictingVals set voted twice blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) - commit, err := types.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) + voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + commit, err := types.MakeCommit(blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ @@ -259,12 +251,16 @@ func TestCheckEvidenceWithLightClientAttack(t *testing.T) { }, ValidatorSet: conflictingVals, }, - CommonHeight: 10, + CommonHeight: 10, + TotalVotingPower: int64(nValidators) * validatorPower, + ByzantineValidators: conflictingVals.Validators, + Timestamp: defaultEvidenceTime, } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) - trustedCommit, err := types.MakeCommit(trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) + trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedCommit, err := types.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, conflictingPrivVals, + defaultEvidenceTime) require.NoError(t, err) state := sm.State{ @@ -273,11 +269,11 @@ func TestCheckEvidenceWithLightClientAttack(t *testing.T) { ConsensusParams: *types.DefaultConsensusParams(), } stateStore := &smmocks.Store{} - stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil) + stateStore.On("LoadValidators", height).Return(conflictingVals, nil) stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) - blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) + blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockCommit", height).Return(trustedCommit) pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) require.NoError(t, err) @@ -290,17 +286,14 @@ func TestCheckEvidenceWithLightClientAttack(t *testing.T) { assert.NoError(t, err) // take away the last signature -> there are less validators then what we have detected, - // hence we move to full verification where the evidence should still pass + // hence this should fail commit.Signatures = append(commit.Signatures[:nValidators-1], types.NewCommitSigAbsent()) err = pool.CheckEvidence(types.EvidenceList{ev}) - assert.NoError(t, err) - - // take away the last two signatures -> should fail due to insufficient power - commit.Signatures = append(commit.Signatures[:nValidators-2], types.NewCommitSigAbsent(), types.NewCommitSigAbsent()) - err = pool.CheckEvidence(types.EvidenceList{ev}) assert.Error(t, err) } +// Tests that restarting the evidence pool after a potential failure will recover the +// pending evidence and continue to gossip it func TestRecoverPendingEvidence(t *testing.T) { height := int64(10) val := types.NewMockPV() @@ -315,9 +308,9 @@ func TestRecoverPendingEvidence(t *testing.T) { require.NoError(t, err) pool.SetLogger(log.TestingLogger()) goodEvidence := types.NewMockDuplicateVoteEvidenceWithValidator(height, - defaultEvidenceTime, val, evidenceChainID) + defaultEvidenceTime.Add(10*time.Minute), val, evidenceChainID) expiredEvidence := types.NewMockDuplicateVoteEvidenceWithValidator(int64(1), - defaultEvidenceTime, val, evidenceChainID) + defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID) err = pool.AddEvidence(goodEvidence) require.NoError(t, err) err = pool.AddEvidence(expiredEvidence) diff --git a/evidence/reactor.go b/evidence/reactor.go index 33da6cd60a..814b978118 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -7,7 +7,6 @@ import ( clist "github.com/lazyledger/lazyledger-core/libs/clist" "github.com/lazyledger/lazyledger-core/libs/log" "github.com/lazyledger/lazyledger-core/p2p" - ep "github.com/lazyledger/lazyledger-core/proto/tendermint/evidence" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" "github.com/lazyledger/lazyledger-core/types" ) @@ -17,8 +16,13 @@ const ( maxMsgSize = 1048576 // 1MB TODO make it configurable - broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often - peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount + // broadcast all uncommitted evidence this often. This sets when the reactor + // goes back to the start of the list and begins sending the evidence again. + // Most evidence should be committed in the very next block that is why we wait + // just over the block production rate before sending evidence again. + broadcastEvidenceIntervalS = 10 + // If a message fails wait this much before sending it again + peerRetryMessageIntervalMS = 100 ) // Reactor handles evpool evidence broadcasting amongst peers. @@ -62,10 +66,12 @@ func (evR *Reactor) AddPeer(peer p2p.Peer) { // Receive implements Reactor. // It adds any received evidence to the evpool. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { evis, err := decodeMsg(msgBytes) if err != nil { - evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err, "bytes", msgBytes) + evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) evR.Switch.StopPeerForError(src, err) return } @@ -117,20 +123,18 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) { } ev := next.Value.(types.Evidence) - evis, retry := evR.checkSendEvidenceMessage(peer, ev) + evis := evR.prepareEvidenceMessage(peer, ev) if len(evis) > 0 { msgBytes, err := encodeMsg(evis) if err != nil { panic(err) } - + evR.Logger.Debug("Gossiping evidence to peer", "ev", ev, "peer", peer.ID()) success := peer.Send(EvidenceChannel, msgBytes) - retry = !success - } - - if retry { - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue + if !success { + time.Sleep(peerRetryMessageIntervalMS * time.Millisecond) + continue + } } afterCh := time.After(time.Second * broadcastEvidenceIntervalS) @@ -150,12 +154,12 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) { } } -// Returns the message to send the peer, or nil if the evidence is invalid for the peer. -// If message is nil, return true if we should sleep and try again. -func (evR Reactor) checkSendEvidenceMessage( +// Returns the message to send to the peer, or nil if the evidence is invalid for the peer. +// If message is nil, we should sleep and try again. +func (evR Reactor) prepareEvidenceMessage( peer p2p.Peer, ev types.Evidence, -) (evis []types.Evidence, retry bool) { +) (evis []types.Evidence) { // make sure the peer is up to date evHeight := ev.Height() @@ -166,7 +170,7 @@ func (evR Reactor) checkSendEvidenceMessage( // different every time due to us using a map. Sometimes other reactors // will be initialized before the consensus reactor. We should wait a few // milliseconds and retry. - return nil, true + return nil } // NOTE: We only send evidence to peers where @@ -177,8 +181,8 @@ func (evR Reactor) checkSendEvidenceMessage( ageNumBlocks = peerHeight - evHeight ) - if peerHeight < evHeight { // peer is behind. sleep while he catches up - return nil, true + if peerHeight <= evHeight { // peer is behind. sleep while he catches up + return nil } else if ageNumBlocks > params.MaxAgeNumBlocks { // evidence is too old relative to the peer, skip // NOTE: if evidence is too old for an honest peer, then we're behind and @@ -192,11 +196,11 @@ func (evR Reactor) checkSendEvidenceMessage( "peer", peer, ) - return nil, false + return nil } // send evidence - return []types.Evidence{ev}, false + return []types.Evidence{ev} } // PeerState describes the state of a peer. @@ -207,16 +211,15 @@ type PeerState interface { // encodemsg takes a array of evidence // returns the byte encoding of the List Message func encodeMsg(evis []types.Evidence) ([]byte, error) { - evi := make([]*tmproto.Evidence, len(evis)) + evi := make([]tmproto.Evidence, len(evis)) for i := 0; i < len(evis); i++ { ev, err := types.EvidenceToProto(evis[i]) if err != nil { return nil, err } - evi[i] = ev + evi[i] = *ev } - - epl := ep.List{ + epl := tmproto.EvidenceList{ Evidence: evi, } @@ -226,14 +229,14 @@ func encodeMsg(evis []types.Evidence) ([]byte, error) { // decodemsg takes an array of bytes // returns an array of evidence func decodeMsg(bz []byte) (evis []types.Evidence, err error) { - lm := ep.List{} + lm := tmproto.EvidenceList{} if err := lm.Unmarshal(bz); err != nil { return nil, err } evis = make([]types.Evidence, len(lm.Evidence)) for i := 0; i < len(lm.Evidence); i++ { - ev, err := types.EvidenceFromProto(lm.Evidence[i]) + ev, err := types.EvidenceFromProto(&lm.Evidence[i]) if err != nil { return nil, err } diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index edd4a78903..f8952fc89f 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -21,12 +21,170 @@ import ( "github.com/lazyledger/lazyledger-core/evidence/mocks" "github.com/lazyledger/lazyledger-core/libs/log" "github.com/lazyledger/lazyledger-core/p2p" - ep "github.com/lazyledger/lazyledger-core/proto/tendermint/evidence" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" sm "github.com/lazyledger/lazyledger-core/state" "github.com/lazyledger/lazyledger-core/types" ) +var ( + numEvidence = 10 + timeout = 120 * time.Second // ridiculously high because CircleCI is slow +) + +// We have N evidence reactors connected to one another. The first reactor +// receives a number of evidence at varying heights. We test that all +// other reactors receive the evidence and add it to their own respective +// evidence pools. +func TestReactorBroadcastEvidence(t *testing.T) { + config := cfg.TestConfig() + N := 7 + + // create statedb for everyone + stateDBs := make([]sm.Store, N) + val := types.NewMockPV() + // we need validators saved for heights at least as high as we have evidence for + height := int64(numEvidence) + 10 + for i := 0; i < N; i++ { + stateDBs[i] = initializeValidatorState(val, height) + } + + // make reactors from statedb + reactors, pools := makeAndConnectReactorsAndPools(config, stateDBs) + + // set the peer height on each reactor + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + ps := peerState{height} + peer.Set(types.PeerStateKey, ps) + } + } + + // send a bunch of valid evidence to the first reactor's evpool + // and wait for them all to be received in the others + evList := sendEvidence(t, pools[0], val, numEvidence) + waitForEvidence(t, evList, pools) +} + +// We have two evidence reactors connected to one another but are at different heights. +// Reactor 1 which is ahead receives a number of evidence. It should only send the evidence +// that is below the height of the peer to that peer. +func TestReactorSelectiveBroadcast(t *testing.T) { + config := cfg.TestConfig() + + val := types.NewMockPV() + height1 := int64(numEvidence) + 10 + height2 := int64(numEvidence) / 2 + + // DB1 is ahead of DB2 + stateDB1 := initializeValidatorState(val, height1) + stateDB2 := initializeValidatorState(val, height2) + + // make reactors from statedb + reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) + + // set the peer height on each reactor + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + ps := peerState{height1} + peer.Set(types.PeerStateKey, ps) + } + } + + // update the first reactor peer's height to be very small + peer := reactors[0].Switch.Peers().List()[0] + ps := peerState{height2} + peer.Set(types.PeerStateKey, ps) + + // send a bunch of valid evidence to the first reactor's evpool + evList := sendEvidence(t, pools[0], val, numEvidence) + + // only ones less than the peers height should make it through + waitForEvidence(t, evList[:numEvidence/2-1], []*evidence.Pool{pools[1]}) + + // peers should still be connected + peers := reactors[1].Switch.Peers().List() + assert.Equal(t, 1, len(peers)) +} + +// This tests aims to ensure that reactors don't send evidence that they have committed or that ar +// not ready for the peer through three scenarios. +// First, committed evidence to a newly connected peer +// Second, evidence to a peer that is behind +// Third, evidence that was pending and became committed just before the peer caught up +func TestReactorsGossipNoCommittedEvidence(t *testing.T) { + config := cfg.TestConfig() + + val := types.NewMockPV() + var height int64 = 10 + + // DB1 is ahead of DB2 + stateDB1 := initializeValidatorState(val, height-1) + stateDB2 := initializeValidatorState(val, height-2) + state, err := stateDB1.Load() + require.NoError(t, err) + state.LastBlockHeight++ + + // make reactors from statedb + reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) + + evList := sendEvidence(t, pools[0], val, 2) + pools[0].Update(state, evList) + require.EqualValues(t, uint32(0), pools[0].Size()) + + time.Sleep(100 * time.Millisecond) + + peer := reactors[0].Switch.Peers().List()[0] + ps := peerState{height - 2} + peer.Set(types.PeerStateKey, ps) + + peer = reactors[1].Switch.Peers().List()[0] + ps = peerState{height} + peer.Set(types.PeerStateKey, ps) + + // wait to see that no evidence comes through + time.Sleep(300 * time.Millisecond) + + // the second pool should not have received any evidence because it has already been committed + assert.Equal(t, uint32(0), pools[1].Size(), "second reactor should not have received evidence") + + // the first reactor receives three more evidence + evList = make([]types.Evidence, 3) + for i := 0; i < 3; i++ { + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height-3+int64(i), + time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, state.ChainID) + err := pools[0].AddEvidence(ev) + require.NoError(t, err) + evList[i] = ev + } + + // wait to see that only one evidence is sent + time.Sleep(300 * time.Millisecond) + + // the second pool should only have received the first evidence because it is behind + peerEv, _ := pools[1].PendingEvidence(10000) + assert.EqualValues(t, []types.Evidence{evList[0]}, peerEv) + + // the last evidence is committed and the second reactor catches up in state to the first + // reactor. We therefore expect that the second reactor only receives one more evidence, the + // one that is still pending and not the evidence that has already been committed. + state.LastBlockHeight++ + pools[0].Update(state, []types.Evidence{evList[2]}) + // the first reactor should have the two remaining pending evidence + require.EqualValues(t, uint32(2), pools[0].Size()) + + // now update the state of the second reactor + pools[1].Update(state, types.EvidenceList{}) + peer = reactors[0].Switch.Peers().List()[0] + ps = peerState{height} + peer.Set(types.PeerStateKey, ps) + + // wait to see that only two evidence is sent + time.Sleep(300 * time.Millisecond) + + peerEv, _ = pools[1].PendingEvidence(1000) + assert.EqualValues(t, []types.Evidence{evList[0], evList[1]}, peerEv) +} + // evidenceLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). func evidenceLogger() log.Logger { @@ -141,41 +299,6 @@ func sendEvidence(t *testing.T, evpool *evidence.Pool, val types.PrivValidator, return evList } -var ( - numEvidence = 10 - timeout = 120 * time.Second // ridiculously high because CircleCI is slow -) - -func TestReactorBroadcastEvidence(t *testing.T) { - config := cfg.TestConfig() - N := 7 - - // create statedb for everyone - stateDBs := make([]sm.Store, N) - val := types.NewMockPV() - // we need validators saved for heights at least as high as we have evidence for - height := int64(numEvidence) + 10 - for i := 0; i < N; i++ { - stateDBs[i] = initializeValidatorState(val, height) - } - - // make reactors from statedb - reactors, pools := makeAndConnectReactorsAndPools(config, stateDBs) - - // set the peer height on each reactor - for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { - ps := peerState{height} - peer.Set(types.PeerStateKey, ps) - } - } - - // send a bunch of valid evidence to the first reactor's evpool - // and wait for them all to be received in the others - evList := sendEvidence(t, pools[0], val, numEvidence) - waitForEvidence(t, evList, pools) -} - type peerState struct { height int64 } @@ -184,44 +307,6 @@ func (ps peerState) GetHeight() int64 { return ps.height } -func TestReactorSelectiveBroadcast(t *testing.T) { - config := cfg.TestConfig() - - val := types.NewMockPV() - height1 := int64(numEvidence) + 10 - height2 := int64(numEvidence) / 2 - - // DB1 is ahead of DB2 - stateDB1 := initializeValidatorState(val, height1) - stateDB2 := initializeValidatorState(val, height2) - - // make reactors from statedb - reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) - - // set the peer height on each reactor - for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { - ps := peerState{height1} - peer.Set(types.PeerStateKey, ps) - } - } - - // update the first reactor peer's height to be very small - peer := reactors[0].Switch.Peers().List()[0] - ps := peerState{height2} - peer.Set(types.PeerStateKey, ps) - - // send a bunch of valid evidence to the first reactor's evpool - evList := sendEvidence(t, pools[0], val, numEvidence) - - // only ones less than the peers height should make it through - waitForEvidence(t, evList[:numEvidence/2], pools[1:2]) - - // peers should still be connected - peers := reactors[1].Switch.Peers().List() - assert.Equal(t, 1, len(peers)) -} - func exampleVote(t byte) *types.Vote { var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") if err != nil { @@ -248,27 +333,39 @@ func exampleVote(t byte) *types.Vote { // nolint:lll //ignore line length for tests func TestEvidenceVectors(t *testing.T) { - dupl := types.NewDuplicateVoteEvidence(exampleVote(1), exampleVote(2)) + val := &types.Validator{ + Address: crypto.AddressHash([]byte("validator_address")), + VotingPower: 10, + } + + valSet := types.NewValidatorSet([]*types.Validator{val}) + + dupl := types.NewDuplicateVoteEvidence( + exampleVote(1), + exampleVote(2), + defaultEvidenceTime, + valSet, + ) testCases := []struct { testName string evidenceList []types.Evidence expBytes string }{ - {"DuplicateVoteEvidence", []types.Evidence{dupl}, "0af9010af6010a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, + {"DuplicateVoteEvidence", []types.Evidence{dupl}, "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105"}, } for _, tc := range testCases { tc := tc - evi := make([]*tmproto.Evidence, len(tc.evidenceList)) + evi := make([]tmproto.Evidence, len(tc.evidenceList)) for i := 0; i < len(tc.evidenceList); i++ { ev, err := types.EvidenceToProto(tc.evidenceList[i]) require.NoError(t, err, tc.testName) - evi[i] = ev + evi[i] = *ev } - epl := ep.List{ + epl := tmproto.EvidenceList{ Evidence: evi, } diff --git a/evidence/verify.go b/evidence/verify.go index 055895ac24..11d0b18df8 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "sort" "time" "github.com/lazyledger/lazyledger-core/light" @@ -16,7 +17,7 @@ import ( // - it is from a key who was a validator at the given height // - it is internally consistent with state // - it was properly signed by the alleged equivocator and meets the individual evidence verification requirements -func (evpool *Pool) verify(evidence types.Evidence) (*info, error) { +func (evpool *Pool) verify(evidence types.Evidence) error { var ( state = evpool.State() height = state.LastBlockHeight @@ -24,22 +25,21 @@ func (evpool *Pool) verify(evidence types.Evidence) (*info, error) { ageNumBlocks = height - evidence.Height() ) - // check that the evidence isn't already committed - if evpool.isCommitted(evidence) { - return nil, errors.New("evidence was already committed") - } - // verify the time of the evidence blockMeta := evpool.blockStore.LoadBlockMeta(evidence.Height()) if blockMeta == nil { - return nil, fmt.Errorf("don't have header at height #%d", evidence.Height()) + return fmt.Errorf("don't have header #%d", evidence.Height()) } evTime := blockMeta.Header.Time + if evidence.Time() != evTime { + return fmt.Errorf("evidence has a different time to the block it is associated with (%v != %v)", + evidence.Time(), evTime) + } ageDuration := state.LastBlockTime.Sub(evTime) // check that the evidence hasn't expired if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks { - return nil, fmt.Errorf( + return fmt.Errorf( "evidence from height %d (created at: %v) is too old; min height is %d and evidence can not be older than %v", evidence.Height(), evTime, @@ -53,62 +53,66 @@ func (evpool *Pool) verify(evidence types.Evidence) (*info, error) { case *types.DuplicateVoteEvidence: valSet, err := evpool.stateDB.LoadValidators(evidence.Height()) if err != nil { - return nil, err - } - err = VerifyDuplicateVote(ev, state.ChainID, valSet) - if err != nil { - return nil, fmt.Errorf("verifying duplicate vote evidence: %w", err) + return err } - - _, val := valSet.GetByAddress(ev.VoteA.ValidatorAddress) - - return &info{ - Evidence: evidence, - Time: evTime, - Validators: []*types.Validator{val}, // just a single validator for duplicate vote evidence - TotalVotingPower: valSet.TotalVotingPower(), - }, nil + return VerifyDuplicateVote(ev, state.ChainID, valSet) case *types.LightClientAttackEvidence: commonHeader, err := getSignedHeader(evpool.blockStore, evidence.Height()) if err != nil { - return nil, err + return err } commonVals, err := evpool.stateDB.LoadValidators(evidence.Height()) if err != nil { - return nil, err + return err } trustedHeader := commonHeader // in the case of lunatic the trusted header is different to the common header if evidence.Height() != ev.ConflictingBlock.Height { trustedHeader, err = getSignedHeader(evpool.blockStore, ev.ConflictingBlock.Height) if err != nil { - return nil, err + return err } } err = VerifyLightClientAttack(ev, commonHeader, trustedHeader, commonVals, state.LastBlockTime, state.ConsensusParams.Evidence.MaxAgeDuration) if err != nil { - return nil, err + return err } // find out what type of attack this was and thus extract the malicious validators. Note in the case of an // Amnesia attack we don't have any malicious validators. - validators, attackType := getMaliciousValidators(ev, commonVals, trustedHeader) - totalVotingPower := ev.ConflictingBlock.ValidatorSet.TotalVotingPower() - if attackType == lunaticType { - totalVotingPower = commonVals.TotalVotingPower() + validators := ev.GetByzantineValidators(commonVals, trustedHeader) + // ensure this matches the validators that are listed in the evidence. They should be ordered based on power. + if validators == nil && ev.ByzantineValidators != nil { + return fmt.Errorf("expected nil validators from an amnesia light client attack but got %d", + len(ev.ByzantineValidators)) + } + + if exp, got := len(validators), len(ev.ByzantineValidators); exp != got { + return fmt.Errorf("expected %d byzantine validators from evidence but got %d", + exp, got) } - return &info{ - Evidence: evidence, - Time: evTime, - Validators: validators, - TotalVotingPower: totalVotingPower, - }, nil + // ensure that both validator arrays are in the same order + sort.Sort(types.ValidatorsByVotingPower(ev.ByzantineValidators)) + + for idx, val := range validators { + if !bytes.Equal(ev.ByzantineValidators[idx].Address, val.Address) { + return fmt.Errorf("evidence contained a different byzantine validator address to the one we were expecting."+ + "Expected %v, got %v", val.Address, ev.ByzantineValidators[idx].Address) + } + if ev.ByzantineValidators[idx].VotingPower != val.VotingPower { + return fmt.Errorf("evidence contained a byzantine validator with a different power to the one we were expecting."+ + "Expected %d, got %d", val.VotingPower, ev.ByzantineValidators[idx].VotingPower) + } + } + + return nil default: - return nil, fmt.Errorf("unrecognized evidence type: %T", evidence) + return fmt.Errorf("unrecognized evidence type: %T", evidence) } + } // VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves @@ -139,8 +143,13 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t } } + if evTotal, valsTotal := e.TotalVotingPower, commonVals.TotalVotingPower(); evTotal != valsTotal { + return fmt.Errorf("total voting power from the evidence and our validator set does not match (%d != %d)", + evTotal, valsTotal) + } + if bytes.Equal(trustedHeader.Hash(), e.ConflictingBlock.Hash()) { - return fmt.Errorf("trusted header hash matches the evidence conflicting header hash: %X", + return fmt.Errorf("trusted header hash matches the evidence's conflicting header hash: %X", trustedHeader.Hash()) } @@ -191,6 +200,17 @@ func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet return fmt.Errorf("address (%X) doesn't match pubkey (%v - %X)", addr, pubKey, pubKey.Address()) } + + // validator voting power and total voting power must match + if val.VotingPower != e.ValidatorPower { + return fmt.Errorf("validator power from evidence and our validator set does not match (%d != %d)", + e.ValidatorPower, val.VotingPower) + } + if valSet.TotalVotingPower() != e.TotalVotingPower { + return fmt.Errorf("total voting power from the evidence and our validator set does not match (%d != %d)", + e.TotalVotingPower, valSet.TotalVotingPower()) + } + va := e.VoteA.ToProto() vb := e.VoteB.ToProto() // Signatures must be valid @@ -219,55 +239,6 @@ func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader, }, nil } -// getMaliciousValidators finds out what style of attack LightClientAttackEvidence was and then works out who -// the malicious validators were and returns them. -func getMaliciousValidators(evidence *types.LightClientAttackEvidence, commonVals *types.ValidatorSet, - trusted *types.SignedHeader) ([]*types.Validator, lightClientAttackType) { - var validators []*types.Validator - // First check if the header is invalid. This means that it is a lunatic attack and therefore we take the - // validators who are in the commonVals and voted for the lunatic header - if isInvalidHeader(trusted.Header, evidence.ConflictingBlock.Header) { - for _, commitSig := range evidence.ConflictingBlock.Commit.Signatures { - if !commitSig.ForBlock() { - continue - } - - _, val := commonVals.GetByAddress(commitSig.ValidatorAddress) - if val == nil { - // validator wasn't in the common validator set - continue - } - validators = append(validators, val) - } - return validators, lunaticType - // Next, check to see if it is an equivocation attack and both commits are in the same round. If this is the - // case then we take the validators from the conflicting light block validator set that voted in both headers. - } else if trusted.Commit.Round == evidence.ConflictingBlock.Commit.Round { - // validator hashes are the same therefore the indexing order of validators are the same and thus we - // only need a single loop to find the validators that voted twice. - for i := 0; i < len(evidence.ConflictingBlock.Commit.Signatures); i++ { - sigA := evidence.ConflictingBlock.Commit.Signatures[i] - if sigA.Absent() { - continue - } - - sigB := trusted.Commit.Signatures[i] - if sigB.Absent() { - continue - } - - _, val := evidence.ConflictingBlock.ValidatorSet.GetByAddress(sigA.ValidatorAddress) - validators = append(validators, val) - } - return validators, equivocationType - - } - // if the rounds are different then this is an amnesia attack. Unfortunately, given the nature of the attack, - // we aren't able yet to deduce which are malicious validators and which are not hence we return an - // empty validator set. - return validators, amnesiaType -} - // isInvalidHeader takes a trusted header and matches it againt a conflicting header // to determine whether the conflicting header was the product of a valid state transition // or not. If it is then all the deterministic fields of the header should be the same. @@ -279,11 +250,3 @@ func isInvalidHeader(trusted, conflicting *types.Header) bool { !bytes.Equal(trusted.AppHash, conflicting.AppHash) || !bytes.Equal(trusted.LastResultsHash, conflicting.LastResultsHash) } - -type lightClientAttackType int - -const ( - lunaticType lightClientAttackType = iota + 1 - equivocationType - amnesiaType -) diff --git a/evidence/verify_test.go b/evidence/verify_test.go index f9f1cd99ed..f1f5362ff9 100644 --- a/evidence/verify_test.go +++ b/evidence/verify_test.go @@ -9,7 +9,6 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/lazyledger/lazyledger-core/abci/types" "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/tmhash" "github.com/lazyledger/lazyledger-core/evidence" @@ -33,14 +32,14 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { conflictingPrivVals := append(commonPrivVals, newPrivVal) commonHeader := makeHeaderRandom(4) - commonHeader.Time = defaultEvidenceTime.Add(-1 * time.Hour) + commonHeader.Time = defaultEvidenceTime trustedHeader := makeHeaderRandom(10) conflictingHeader := makeHeaderRandom(10) + conflictingHeader.Time = defaultEvidenceTime.Add(1 * time.Hour) conflictingHeader.ValidatorsHash = conflictingVals.Hash() - // we are simulating a duplicate vote attack where all the validators in the conflictingVals set - // vote twice + // we are simulating a lunatic light client attack blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) commit, err := types.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) @@ -53,7 +52,10 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { }, ValidatorSet: conflictingVals, }, - CommonHeight: 4, + CommonHeight: 4, + TotalVotingPower: 20, + ByzantineValidators: commonVals.Validators, + Timestamp: defaultEvidenceTime, } commonSignedHeader := &types.SignedHeader{ @@ -72,16 +74,23 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { // good pass -> no error err = evidence.VerifyLightClientAttack(ev, commonSignedHeader, trustedSignedHeader, commonVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) assert.NoError(t, err) // trusted and conflicting hashes are the same -> an error should be returned err = evidence.VerifyLightClientAttack(ev, commonSignedHeader, ev.ConflictingBlock.SignedHeader, commonVals, - defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) assert.Error(t, err) + // evidence with different total validator power should fail + ev.TotalVotingPower = 1 + err = evidence.VerifyLightClientAttack(ev, commonSignedHeader, trustedSignedHeader, commonVals, + defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) + assert.Error(t, err) + ev.TotalVotingPower = 20 + state := sm.State{ - LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), + LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour), LastBlockHeight: 11, ConsensusParams: *types.DefaultConsensusParams(), } @@ -105,27 +114,18 @@ func TestVerifyLightClientAttack_Lunatic(t *testing.T) { pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) assert.Equal(t, 1, len(pendingEvs)) - pubKey, err := newPrivVal.GetPubKey() - require.NoError(t, err) - lastCommit := makeCommit(state.LastBlockHeight, pubKey.Address()) - block := types.MakeBlock(state.LastBlockHeight, []types.Tx{}, []types.Evidence{ev}, nil, types.Messages{}, lastCommit) - - abciEv := pool.ABCIEvidence(block.Height, block.Evidence.Evidence) - expectedAbciEv := make([]abci.Evidence, len(commonVals.Validators)) - - // we expect evidence to be made for all validators in the common validator set - for idx, val := range commonVals.Validators { - ev := abci.Evidence{ - Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, - Validator: types.TM2PB.Validator(val), - Height: commonHeader.Height, - Time: commonHeader.Time, - TotalVotingPower: commonVals.TotalVotingPower(), - } - expectedAbciEv[idx] = ev - } + // if we submit evidence only against a single byzantine validator when we see there are more validators then this + // should return an error + ev.ByzantineValidators = []*types.Validator{commonVals.Validators[0]} + err = pool.CheckEvidence(evList) + assert.Error(t, err) + ev.ByzantineValidators = commonVals.Validators // restore evidence + + // If evidence is submitted with an altered timestamp it should return an error + ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute) + err = pool.CheckEvidence(evList) + assert.Error(t, err) - assert.Equal(t, expectedAbciEv, abciEv) } func TestVerifyLightClientAttack_Equivocation(t *testing.T) { @@ -155,7 +155,10 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { }, ValidatorSet: conflictingVals, }, - CommonHeight: 10, + CommonHeight: 10, + ByzantineValidators: conflictingVals.Validators[:4], + TotalVotingPower: 50, + Timestamp: defaultEvidenceTime, } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) @@ -168,12 +171,12 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { } // good pass -> no error - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil, + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) assert.NoError(t, err) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, nil, + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) assert.Error(t, err) @@ -208,31 +211,6 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) assert.Equal(t, 1, len(pendingEvs)) - - pubKey, err := conflictingPrivVals[0].GetPubKey() - require.NoError(t, err) - lastCommit := makeCommit(state.LastBlockHeight, pubKey.Address()) - block := types.MakeBlock(state.LastBlockHeight, []types.Tx{}, []types.Evidence{ev}, nil, types.Messages{}, lastCommit) - - abciEv := pool.ABCIEvidence(block.Height, block.Evidence.Evidence) - expectedAbciEv := make([]abci.Evidence, len(conflictingVals.Validators)-1) - - // we expect evidence to be made for all validators except the last one - for idx, val := range conflictingVals.Validators { - if idx == 4 { // skip the last validator - continue - } - ev := abci.Evidence{ - Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, - Validator: types.TM2PB.Validator(val), - Height: ev.ConflictingBlock.Height, - Time: ev.ConflictingBlock.Time, - TotalVotingPower: ev.ConflictingBlock.ValidatorSet.TotalVotingPower(), - } - expectedAbciEv[idx] = ev - } - - assert.Equal(t, expectedAbciEv, abciEv) } func TestVerifyLightClientAttack_Amnesia(t *testing.T) { @@ -261,7 +239,10 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { }, ValidatorSet: conflictingVals, }, - CommonHeight: 10, + CommonHeight: 10, + ByzantineValidators: nil, // with amnesia evidence no validators are submitted as abci evidence + TotalVotingPower: 50, + Timestamp: defaultEvidenceTime, } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) @@ -274,12 +255,12 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { } // good pass -> no error - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil, + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) assert.NoError(t, err) // trusted and conflicting hashes are the same -> an error should be returned - err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, nil, + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) assert.Error(t, err) @@ -305,20 +286,6 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) assert.Equal(t, 1, len(pendingEvs)) - - pubKey, err := conflictingPrivVals[0].GetPubKey() - require.NoError(t, err) - lastCommit := makeCommit(state.LastBlockHeight, pubKey.Address()) - block := types.MakeBlock(state.LastBlockHeight, []types.Tx{}, []types.Evidence{ev}, nil, types.Messages{}, lastCommit) - - abciEv := pool.ABCIEvidence(block.Height, block.Evidence.Evidence) - // as we are unable to find out which subset of validators in the commit were malicious, no information - // is sent to the application. We expect the array to be empty - emptyEvidenceBlock := types.MakeBlock(state.LastBlockHeight, []types.Tx{}, - []types.Evidence{}, nil, types.Messages{}, lastCommit) - expectedAbciEv := pool.ABCIEvidence(emptyEvidenceBlock.Height, emptyEvidenceBlock.Evidence.Evidence) - - assert.Equal(t, expectedAbciEv, abciEv) } type voteData struct { @@ -369,8 +336,11 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { require.NoError(t, err) for _, c := range cases { ev := &types.DuplicateVoteEvidence{ - VoteA: c.vote1, - VoteB: c.vote2, + VoteA: c.vote1, + VoteB: c.vote2, + ValidatorPower: 1, + TotalVotingPower: 1, + Timestamp: defaultEvidenceTime, } if c.valid { assert.Nil(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be valid") @@ -379,7 +349,14 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { } } + // create good evidence and correct validator power goodEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID) + goodEv.ValidatorPower = 1 + goodEv.TotalVotingPower = 1 + badEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID) + badTimeEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime.Add(1*time.Minute), val, chainID) + badTimeEv.ValidatorPower = 1 + badTimeEv.TotalVotingPower = 1 state := sm.State{ ChainID: chainID, LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), @@ -398,6 +375,16 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { evList := types.EvidenceList{goodEv} err = pool.CheckEvidence(evList) assert.NoError(t, err) + + // evidence with a different validator power should fail + evList = types.EvidenceList{badEv} + err = pool.CheckEvidence(evList) + assert.Error(t, err) + + // evidence with a different timestamp should fail + evList = types.EvidenceList{badTimeEv} + err = pool.CheckEvidence(evList) + assert.Error(t, err) } func makeVote( diff --git a/go.mod b/go.mod index c9b86e2c08..fc492201e3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/lazyledger/lazyledger-core -go 1.14 +go 1.15 require ( github.com/BurntSushi/toml v0.3.1 @@ -8,8 +8,8 @@ require ( github.com/Workiva/go-datastructures v1.0.52 github.com/btcsuite/btcd v0.21.0-beta github.com/btcsuite/btcutil v1.0.2 - github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb - github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd + github.com/confio/ics23/go v0.6.3 + github.com/cosmos/iavl v0.15.0 github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.10.0 github.com/go-logfmt/logfmt v0.5.0 @@ -17,6 +17,7 @@ require ( github.com/golang/protobuf v1.4.3 github.com/gorilla/websocket v1.4.2 github.com/gtank/merlin v0.1.1 + github.com/hdevalence/ed25519consensus v0.0.0-20201207055737-7fde80a9d5ff github.com/lazyledger/nmt v0.0.0-20200908210531-1a5d9124ad10 github.com/lazyledger/rsmt2d v0.0.0-20200626141417-ea94438fa457 github.com/libp2p/go-buffer-pool v0.0.2 @@ -27,12 +28,12 @@ require ( github.com/rs/cors v1.7.0 github.com/sasha-s/go-deadlock v0.2.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.1.0 + github.com/spf13/cobra v1.1.1 github.com/spf13/viper v1.7.1 github.com/stretchr/testify v1.6.1 - github.com/tendermint/tendermint v0.34.0-rc4 - github.com/tendermint/tm-db v0.6.2 - golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a - golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc - google.golang.org/grpc v1.32.0 + github.com/tendermint/tendermint v0.34.0 + github.com/tendermint/tm-db v0.6.3 + golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9 + golang.org/x/net v0.0.0-20200822124328-c89045814202 + google.golang.org/grpc v1.34.0 ) diff --git a/go.sum b/go.sum index d92e48edc7..76a9024376 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-alpha.2 h1:EWbZLqGEPSIj2W69gx04KtNVkyPIfe3uj0DhDQJonbQ= +filippo.io/edwards25519 v1.0.0-alpha.2/go.mod h1:X+pm78QAUPtFLi1z9PYIlS/bdDnvbCOGKtZ+ACWEf7o= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -85,10 +87,13 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb h1:+7FsS1gZ1Km5LRjGV2hztpier/5i6ngNjvNpxbWP5I0= github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= +github.com/confio/ics23/go v0.6.3 h1:PuGK2V1NJWZ8sSkNDq91jgT/cahFEW9RGp4Y5jxulf0= +github.com/confio/ics23/go v0.6.3/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -103,6 +108,10 @@ github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fj github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd h1:K3bmPkMDnd2KVQ7xoGmgp+pxoXcBW58vMWaMl9ZWx3c= github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd/go.mod h1:3xOIaNNX19p0QrX0VqWa6voPRoJRGGYtny+DH8NEPvE= +github.com/cosmos/iavl v0.15.0-rc5 h1:AMKgaAjXwGANWv56NL4q4hV+a0puSkLYD6cCQAv3i44= +github.com/cosmos/iavl v0.15.0-rc5/go.mod h1:WqoPL9yPTQ85QBMT45OOUzPxG/U/JcJoN7uMjgxke/I= +github.com/cosmos/iavl v0.15.0 h1:MKIZlwRuls9mUXnMCtqHRDsiZXeevUvDcVrxa1cw+8A= +github.com/cosmos/iavl v0.15.0/go.mod h1:OLjQiAQ4fGD2KDZooyJG9yz+p2ao2IAYSbke8mVvSA4= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -114,6 +123,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgraph-io/badger/v2 v2.2007.1 h1:t36VcBCpo4SsmAD5M8wVv1ieVzcALyGfaJ92z4ccULM= github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= +github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -131,6 +142,7 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= @@ -150,6 +162,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -187,6 +200,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -201,6 +215,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -209,6 +226,7 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -223,12 +241,16 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.1/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.7 h1:Nk5kuHrnWUTf/0GL1a/vchH/om9Ap2/HnVna+jYZgTY= github.com/grpc-ecosystem/grpc-gateway v1.14.7/go.mod h1:oYZKL012gGh6LMyg/xA7Q2yq6j8bu0wa+9w14EEthWU= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f h1:8N8XWLZelZNibkhM1FuF+3Ad3YIbgirjdMiVA0eUkaM= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -258,6 +280,11 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hdevalence/ed25519consensus v0.0.0-20200813231810-1694d75e712a h1:H7I/CTwAupJEX4g8AesPYRKQY0wbGZxQBlg842dGK3k= +github.com/hdevalence/ed25519consensus v0.0.0-20200813231810-1694d75e712a/go.mod h1:V0zo781scjlo5OzNQb2GI8wMt6CD4vs7y1beXtxZEhM= +github.com/hdevalence/ed25519consensus v0.0.0-20201207055737-7fde80a9d5ff h1:LeVKjw8pcDQj7WVVnbFvbD7ovcv+r/l15ka1NH6Lswc= +github.com/hdevalence/ed25519consensus v0.0.0-20201207055737-7fde80a9d5ff/go.mod h1:Feit0l8NcNO4g69XNjwvsR0LGcwMMfzI1TF253rOIlQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -287,6 +314,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -355,6 +383,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= @@ -394,6 +423,7 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -409,6 +439,7 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4= github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -457,8 +488,8 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.0 h1:aq3wCKjTPmzcNWLVGnsFVN4rflK7Uzn10F8/aw8MhdQ= -github.com/spf13/cobra v1.1.0/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -488,10 +519,14 @@ github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D6 github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/tendermint v0.34.0-rc4 h1:fnPyDFz9QGAU6tjExoQ8ZY63eHkzdBg5StQgDoeuK0s= github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4= +github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg= +github.com/tendermint/tendermint v0.34.0 h1:eXCfMgoqVSzrjzOj6clI9GAejcHH0LvOlRjpCmMJksU= +github.com/tendermint/tendermint v0.34.0/go.mod h1:Aj3PIipBFSNO21r+Lq3TtzQ+uKESxkbA3yo/INM4QwQ= github.com/tendermint/tm-db v0.6.2 h1:DOn8jwCdjJblrCFJbtonEIPD1IuJWpbRUUdR8GWE4RM= github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI= +github.com/tendermint/tm-db v0.6.3 h1:ZkhQcKnB8/2jr5EaZwGndN4owkPsGezW2fSisS9zGbg= +github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -537,9 +572,14 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9 h1:phUcVbl53swtrUN8kQEXFhUxPlIlWyBfKmidCu7P95o= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -549,7 +589,6 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5 h1:FR+oGxGfbQu1d+jglI3rCkjAjUnhRSZcUxr+DqlDLNo= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -594,10 +633,13 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -628,6 +670,7 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -637,9 +680,12 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed h1:J22ig1FUekjjkmZUM7pTKixYm8DvrYsvrBZdunYeIuQ= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -712,6 +758,10 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6 h1:iRN4+t0lvZX/l9gH14ARF9i58tsVa5a97k6aH95rC3Y= +google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201119123407-9b1e624d6bc4 h1:Rt0FRalMgdSlXAVJvX4pr65KfqaxHXSLkSJRD9pw6g0= +google.golang.org/genproto v0.0.0-20201119123407-9b1e624d6bc4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -727,25 +777,36 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -760,8 +821,10 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go index a1fe3a7a7f..c2442a56f9 100644 --- a/libs/autofile/autofile_test.go +++ b/libs/autofile/autofile_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - tmos "github.com/lazyledger/lazyledger-core/libs/os" ) func TestSIGHUP(t *testing.T) { @@ -27,10 +25,9 @@ func TestSIGHUP(t *testing.T) { dir, err := ioutil.TempDir("", "sighup_test") require.NoError(t, err) t.Cleanup(func() { - os.RemoveAll(dir) + _ = os.RemoveAll(dir) }) - err = os.Chdir(dir) - require.NoError(t, err) + require.NoError(t, os.Chdir(dir)) // Create an AutoFile in the temporary directory name := "sighup_test" @@ -45,19 +42,16 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, err) // Move the file over - err = os.Rename(name, name+"_old") - require.NoError(t, err) + require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory otherDir, err := ioutil.TempDir("", "sighup_test_other") require.NoError(t, err) - defer os.RemoveAll(otherDir) - err = os.Chdir(otherDir) - require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(otherDir) }) + require.NoError(t, os.Chdir(otherDir)) // Send SIGHUP to self. - err = syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - require.NoError(t, err) + require.NoError(t, syscall.Kill(syscall.Getpid(), syscall.SIGHUP)) // Wait a bit... signals are not handled synchronously. time.Sleep(time.Millisecond * 10) @@ -67,14 +61,13 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, err) _, err = af.Write([]byte("Line 4\n")) require.NoError(t, err) - err = af.Close() - require.NoError(t, err) + require.NoError(t, af.Close()) // Both files should exist - if body := tmos.MustReadFile(filepath.Join(dir, name+"_old")); string(body) != "Line 1\nLine 2\n" { + if body := mustReadFile(t, filepath.Join(dir, name+"_old")); string(body) != "Line 1\nLine 2\n" { t.Errorf("unexpected body %s", body) } - if body := tmos.MustReadFile(filepath.Join(dir, name)); string(body) != "Line 3\nLine 4\n" { + if body := mustReadFile(t, filepath.Join(dir, name)); string(body) != "Line 3\nLine 4\n" { t.Errorf("unexpected body %s", body) } @@ -115,8 +108,7 @@ func TestAutoFileSize(t *testing.T) { // First, create an AutoFile writing to a tempfile dir f, err := ioutil.TempFile("", "sighup_test") require.NoError(t, err) - err = f.Close() - require.NoError(t, err) + require.NoError(t, f.Close()) // Here is the actual AutoFile. af, err := OpenAutoFile(f.Name()) @@ -136,14 +128,19 @@ func TestAutoFileSize(t *testing.T) { require.NoError(t, err) // 3. Not existing file - err = af.Close() - require.NoError(t, err) - err = os.Remove(f.Name()) - require.NoError(t, err) + require.NoError(t, af.Close()) + require.NoError(t, os.Remove(f.Name())) size, err = af.Size() require.EqualValues(t, 0, size, "Expected a new file to be empty") require.NoError(t, err) // Cleanup - _ = os.Remove(f.Name()) + t.Cleanup(func() { os.Remove(f.Name()) }) +} + +func mustReadFile(t *testing.T, filePath string) []byte { + fileBytes, err := ioutil.ReadFile(filePath) + require.NoError(t, err) + + return fileBytes } diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index b64bbbe230..bfc298de96 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -2,7 +2,9 @@ package bits import ( "encoding/binary" + "errors" "fmt" + "math" "regexp" "strings" "sync" @@ -27,7 +29,7 @@ func NewBitArray(bits int) *BitArray { } return &BitArray{ Bits: bits, - Elems: make([]uint64, (bits+63)/64), + Elems: make([]uint64, numElems(bits)), } } @@ -100,7 +102,7 @@ func (bA *BitArray) copy() *BitArray { } func (bA *BitArray) copyBits(bits int) *BitArray { - c := make([]uint64, (bits+63)/64) + c := make([]uint64, numElems(bits)) copy(c, bA.Elems) return &BitArray{ Bits: bits, @@ -418,27 +420,45 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { return nil } -// ToProto converts BitArray to protobuf +// ToProto converts BitArray to protobuf. It returns nil if BitArray is +// nil/empty. +// +// XXX: It does not copy the array. func (bA *BitArray) ToProto() *tmprotobits.BitArray { - if bA == nil || len(bA.Elems) == 0 { + if bA == nil || + (len(bA.Elems) == 0 && bA.Bits == 0) { // empty return nil } - return &tmprotobits.BitArray{ - Bits: int64(bA.Bits), - Elems: bA.Elems, - } + return &tmprotobits.BitArray{Bits: int64(bA.Bits), Elems: bA.Elems} } -// FromProto sets a protobuf BitArray to the given pointer. -func (bA *BitArray) FromProto(protoBitArray *tmprotobits.BitArray) { +// FromProto sets BitArray to the given protoBitArray. It returns an error if +// protoBitArray is invalid. +// +// XXX: It does not copy the array. +func (bA *BitArray) FromProto(protoBitArray *tmprotobits.BitArray) error { if protoBitArray == nil { - bA = nil - return + return nil } - bA.Bits = int(protoBitArray.Bits) - if len(protoBitArray.Elems) > 0 { - bA.Elems = protoBitArray.Elems + // Validate protoBitArray. + if protoBitArray.Bits < 0 { + return errors.New("negative Bits") + } + // #[32bit] + if protoBitArray.Bits > math.MaxInt32 { // prevent overflow on 32bit systems + return errors.New("too many Bits") + } + if got, exp := len(protoBitArray.Elems), numElems(int(protoBitArray.Bits)); got != exp { + return fmt.Errorf("invalid number of Elems: got %d, but exp %d", got, exp) } + + bA.Bits = int(protoBitArray.Bits) + bA.Elems = protoBitArray.Elems + return nil +} + +func numElems(bits int) int { + return (bits + 63) / 64 } diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index 110fd1cfd0..4035fcca00 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -4,12 +4,14 @@ import ( "bytes" "encoding/json" "fmt" + "math" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" tmrand "github.com/lazyledger/lazyledger-core/libs/rand" + tmprotobits "github.com/lazyledger/lazyledger-core/proto/tendermint/libs/bits" ) func randBitArray(bits int) (*BitArray, []byte) { @@ -266,7 +268,7 @@ func TestJSONMarshalUnmarshal(t *testing.T) { } } -func TestBitArrayProtoBuf(t *testing.T) { +func TestBitArrayToFromProto(t *testing.T) { testCases := []struct { msg string bA1 *BitArray @@ -280,11 +282,41 @@ func TestBitArrayProtoBuf(t *testing.T) { for _, tc := range testCases { protoBA := tc.bA1.ToProto() ba := new(BitArray) - ba.FromProto(protoBA) + err := ba.FromProto(protoBA) if tc.expPass { + assert.NoError(t, err) require.Equal(t, tc.bA1, ba, tc.msg) } else { require.NotEqual(t, tc.bA1, ba, tc.msg) } } } + +func TestBitArrayFromProto(t *testing.T) { + testCases := []struct { + pbA *tmprotobits.BitArray + resA *BitArray + expErr bool + }{ + 0: {nil, &BitArray{}, false}, + 1: {&tmprotobits.BitArray{}, &BitArray{}, false}, + + 2: {&tmprotobits.BitArray{Bits: 1, Elems: make([]uint64, 1)}, &BitArray{Bits: 1, Elems: make([]uint64, 1)}, false}, + + 3: {&tmprotobits.BitArray{Bits: -1, Elems: make([]uint64, 1)}, &BitArray{}, true}, + 4: {&tmprotobits.BitArray{Bits: math.MaxInt32 + 1, Elems: make([]uint64, 1)}, &BitArray{}, true}, + 5: {&tmprotobits.BitArray{Bits: 1, Elems: make([]uint64, 2)}, &BitArray{}, true}, + } + + for i, tc := range testCases { + bA := new(BitArray) + err := bA.FromProto(tc.pbA) + if tc.expErr { + assert.Error(t, err, "#%d", i) + assert.Equal(t, tc.resA, bA, "#%d", i) + } else { + assert.NoError(t, err, "#%d", i) + assert.Equal(t, tc.resA, bA, "#%d", i) + } + } +} diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index ba9e172e68..a81a2f84b5 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -1,8 +1,10 @@ package bytes import ( + "bytes" "encoding/binary" "encoding/hex" + "encoding/json" "fmt" "strings" ) @@ -10,13 +12,10 @@ import ( // The main purpose of HexBytes is to enable HEX-encoding for json/encoding. type HexBytes []byte -func (bz HexBytes) MarshalDelimited() ([]byte, error) { - lenBuf := make([]byte, binary.MaxVarintLen64) - length := uint64(len(bz)) - n := binary.PutUvarint(lenBuf, length) - - return append(lenBuf[:n], bz...), nil -} +var ( + _ json.Marshaler = HexBytes{} + _ json.Unmarshaler = &HexBytes{} +) // Marshal needed for protobuf compatibility func (bz HexBytes) Marshal() ([]byte, error) { @@ -29,7 +28,16 @@ func (bz *HexBytes) Unmarshal(data []byte) error { return nil } -// This is the point of Bytes. +func (bz HexBytes) MarshalDelimited() ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(bz)) + n := binary.PutUvarint(lenBuf, length) + + return append(lenBuf[:n], bz...), nil +} + +// MarshalJSON implements the json.Marshaler interface. The hex bytes is a +// quoted hexadecimal encoded string. func (bz HexBytes) MarshalJSON() ([]byte, error) { s := strings.ToUpper(hex.EncodeToString(bz)) jbz := make([]byte, len(s)+2) @@ -39,16 +47,23 @@ func (bz HexBytes) MarshalJSON() ([]byte, error) { return jbz, nil } -// This is the point of Bytes. +// UnmarshalJSON implements the json.Umarshaler interface. func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte("null")) { + return nil + } + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { return fmt.Errorf("invalid hex string: %s", data) } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) if err != nil { return err } + *bz = bz2 + return nil } diff --git a/libs/fail/fail.go b/libs/fail/fail.go index 38cec9a296..03a2ca6682 100644 --- a/libs/fail/fail.go +++ b/libs/fail/fail.go @@ -32,16 +32,9 @@ func Fail() { } if callIndex == callIndexToFail { - Exit() + fmt.Printf("*** fail-test %d ***\n", callIndex) + os.Exit(1) } callIndex++ } - -func Exit() { - fmt.Printf("*** fail-test %d ***\n", callIndex) - os.Exit(1) - // proc, _ := os.FindProcess(os.Getpid()) - // proc.Signal(os.Interrupt) - // panic(fmt.Sprintf("*** fail-test %d ***", callIndex)) -} diff --git a/libs/math/fraction.go b/libs/math/fraction.go index 399bc1c186..a8d2855924 100644 --- a/libs/math/fraction.go +++ b/libs/math/fraction.go @@ -3,18 +3,18 @@ package math import ( "errors" "fmt" + "math" "strconv" "strings" ) -// Fraction defined in terms of a numerator divided by a denominator in int64 -// format. +// Fraction defined in terms of a numerator divided by a denominator in uint64 +// format. Fraction must be positive. type Fraction struct { // The portion of the denominator in the faction, e.g. 2 in 2/3. - Numerator int64 `json:"numerator"` - // The value by which the numerator is divided, e.g. 3 in 2/3. Must be - // positive. - Denominator int64 `json:"denominator"` + Numerator uint64 `json:"numerator"` + // The value by which the numerator is divided, e.g. 3 in 2/3. + Denominator uint64 `json:"denominator"` } func (fr Fraction) String() string { @@ -27,16 +27,22 @@ func (fr Fraction) String() string { func ParseFraction(f string) (Fraction, error) { o := strings.Split(f, "/") if len(o) != 2 { - return Fraction{}, errors.New("incorrect formating: should be like \"1/3\"") + return Fraction{}, errors.New("incorrect formating: should have a single slash i.e. \"1/3\"") } - numerator, err := strconv.ParseInt(o[0], 10, 64) + numerator, err := strconv.ParseUint(o[0], 10, 64) if err != nil { return Fraction{}, fmt.Errorf("incorrect formatting, err: %w", err) } - denominator, err := strconv.ParseInt(o[1], 10, 64) + denominator, err := strconv.ParseUint(o[1], 10, 64) if err != nil { return Fraction{}, fmt.Errorf("incorrect formatting, err: %w", err) } + if denominator == 0 { + return Fraction{}, errors.New("denominator can't be 0") + } + if numerator > math.MaxInt64 || denominator > math.MaxInt64 { + return Fraction{}, fmt.Errorf("value overflow, numerator and denominator must be less than %d", int64(math.MaxInt64)) + } return Fraction{Numerator: numerator, Denominator: denominator}, nil } diff --git a/libs/math/fraction_test.go b/libs/math/fraction_test.go index e4cabd32d5..73ca0f6c83 100644 --- a/libs/math/fraction_test.go +++ b/libs/math/fraction_test.go @@ -23,15 +23,33 @@ func TestParseFraction(t *testing.T) { exp: Fraction{15, 5}, err: false, }, + // test divide by zero error + { + f: "2/0", + exp: Fraction{}, + err: true, + }, + // test negative { f: "-1/2", - exp: Fraction{-1, 2}, - err: false, + exp: Fraction{}, + err: true, }, { f: "1/-2", - exp: Fraction{1, -2}, - err: false, + exp: Fraction{}, + err: true, + }, + // test overflow + { + f: "9223372036854775808/2", + exp: Fraction{}, + err: true, + }, + { + f: "2/9223372036854775808", + exp: Fraction{}, + err: true, }, { f: "2/3/4", diff --git a/libs/os/os.go b/libs/os/os.go index ea24a42f60..733f7e9422 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -3,7 +3,6 @@ package os import ( "fmt" "io" - "io/ioutil" "os" "os/signal" "syscall" @@ -13,31 +12,22 @@ type logger interface { Info(msg string, keyvals ...interface{}) } -// TrapSignal catches the SIGTERM/SIGINT and executes cb function. After that it exits -// with code 0. +// TrapSignal catches SIGTERM and SIGINT, executes the cleanup function, +// and exits with code 0. func TrapSignal(logger logger, cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { - for sig := range c { - logger.Info(fmt.Sprintf("captured %v, exiting...", sig)) - if cb != nil { - cb() - } - os.Exit(0) + sig := <-c + logger.Info(fmt.Sprintf("captured %v, exiting...", sig)) + if cb != nil { + cb() } + os.Exit(0) }() } -// Kill the running process by sending itself SIGTERM. -func Kill() error { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - return err - } - return p.Signal(syscall.SIGTERM) -} - func Exit(s string) { fmt.Printf(s + "\n") os.Exit(1) @@ -58,30 +48,6 @@ func FileExists(filePath string) bool { return !os.IsNotExist(err) } -func ReadFile(filePath string) ([]byte, error) { - return ioutil.ReadFile(filePath) -} - -func MustReadFile(filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) - if err != nil { - Exit(fmt.Sprintf("MustReadFile failed: %v", err)) - return nil - } - return fileBytes -} - -func WriteFile(filePath string, contents []byte, mode os.FileMode) error { - return ioutil.WriteFile(filePath, contents, mode) -} - -func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { - err := WriteFile(filePath, contents, mode) - if err != nil { - Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) - } -} - // CopyFile copies a file. It truncates the destination file if it exists. func CopyFile(src, dst string) error { info, err := os.Stat(src) diff --git a/libs/os/os_test.go b/libs/os/os_test.go index 9c80f1f5a3..60e65a34ff 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -1,11 +1,16 @@ -package os +package os_test import ( "bytes" "fmt" "io/ioutil" "os" + "os/exec" + "syscall" "testing" + "time" + + tmos "github.com/lazyledger/lazyledger-core/libs/os" ) func TestCopyFile(t *testing.T) { @@ -20,7 +25,7 @@ func TestCopyFile(t *testing.T) { } copyfile := fmt.Sprintf("%s.copy", tmpfile.Name()) - if err := CopyFile(tmpfile.Name(), copyfile); err != nil { + if err := tmos.CopyFile(tmpfile.Name(), copyfile); err != nil { t.Fatal(err) } if _, err := os.Stat(copyfile); os.IsNotExist(err) { @@ -35,3 +40,63 @@ func TestCopyFile(t *testing.T) { } os.Remove(copyfile) } + +func TestTrapSignal(t *testing.T) { + if os.Getenv("TM_TRAP_SIGNAL_TEST") == "1" { + t.Log("inside test process") + killer() + return + } + + cmd, _, mockStderr := newTestProgram(t, "TM_TRAP_SIGNAL_TEST") + + err := cmd.Run() + if err == nil { + wantStderr := "exiting" + if mockStderr.String() != wantStderr { + t.Fatalf("stderr: want %q, got %q", wantStderr, mockStderr.String()) + } + + return + } + + if e, ok := err.(*exec.ExitError); ok && !e.Success() { + t.Fatalf("wrong exit code, want 0, got %d", e.ExitCode()) + } + + t.Fatal("this error should not be triggered") +} + +type mockLogger struct{} + +func (ml mockLogger) Info(msg string, keyvals ...interface{}) {} + +func killer() { + logger := mockLogger{} + + tmos.TrapSignal(logger, func() { _, _ = fmt.Fprintf(os.Stderr, "exiting") }) + time.Sleep(1 * time.Second) + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + panic(err) + } + + if err := p.Signal(syscall.SIGTERM); err != nil { + panic(err) + } + + time.Sleep(1 * time.Second) +} + +func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *bytes.Buffer, stderr *bytes.Buffer) { + t.Helper() + + cmd = exec.Command(os.Args[0], "-test.run="+t.Name()) + stdout, stderr = bytes.NewBufferString(""), bytes.NewBufferString("") + cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", environVar)) + cmd.Stdout = stdout + cmd.Stderr = stderr + + return +} diff --git a/light/client.go b/light/client.go index 49be3dac9f..921f7d824d 100644 --- a/light/client.go +++ b/light/client.go @@ -123,7 +123,7 @@ type Client struct { providerMutex tmsync.Mutex // Primary provider of new headers. primary provider.Provider - // See Witnesses option + // Providers used to "witness" new headers. witnesses []provider.Provider // Where trusted light blocks are stored. @@ -218,16 +218,8 @@ func NewClientFromTrustedStore( } // Validate the number of witnesses. - if len(c.witnesses) < 1 && c.verificationMode == skipping { - return nil, errNoWitnesses{} - } - - // Verify witnesses are all on the same chain. - for i, w := range witnesses { - if w.ChainID() != chainID { - return nil, fmt.Errorf("witness #%d: %v is on another chain %s, expected %s", - i, w, w.ChainID(), chainID) - } + if len(c.witnesses) < 1 { + return nil, ErrNoWitnesses } // Validate trust level. @@ -363,10 +355,8 @@ func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOp } // 3) Cross-verify with witnesses to ensure everybody has the same state. - if len(c.witnesses) > 0 { - if err := c.compareFirstHeaderWithWitnesses(ctx, l.SignedHeader); err != nil { - return err - } + if err := c.compareFirstHeaderWithWitnesses(ctx, l.SignedHeader); err != nil { + return err } // 4) Persist both of them and continue. @@ -443,7 +433,7 @@ func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, } // VerifyLightBlockAtHeight fetches the light block at the given height -// and calls verifyLightBlock. It returns the block immediately if it exists in +// and verifies it. It returns the block immediately if it exists in // the trustedStore (no verification is needed). // // height must be > 0. @@ -457,7 +447,7 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now return nil, errors.New("negative or zero height") } - // Check if the light block already verified. + // Check if the light block is already verified. h, err := c.TrustedLightBlock(height) if err == nil { c.logger.Info("Header has already been verified", "height", height, "hash", hash2str(h.Hash())) @@ -600,6 +590,7 @@ func (c *Client) verifySequential( verifiedBlock = trustedBlock interimBlock *types.LightBlock err error + trace = []*types.LightBlock{trustedBlock} ) for height := trustedBlock.Height + 1; height <= newLightBlock.Height; height++ { @@ -635,7 +626,7 @@ func (c *Client) verifySequential( // If some intermediate header is invalid, replace the primary and try // again. - c.logger.Error("primary sent invalid header -> replacing", "err", err) + c.logger.Error("primary sent invalid header -> replacing", "err", err, "primary", c.primary) replaceErr := c.replacePrimaryProvider() if replaceErr != nil { c.logger.Error("Can't replace primary", "err", replaceErr) @@ -669,9 +660,17 @@ func (c *Client) verifySequential( // 3) Update verifiedBlock verifiedBlock = interimBlock + + // 4) Add verifiedBlock to trace + trace = append(trace, verifiedBlock) } - return nil + // Compare header with the witnesses to ensure it's not a fork. + // More witnesses we have, more chance to notice one. + // + // CORRECTNESS ASSUMPTION: there's at least 1 correct full node + // (primary or one of the witnesses). + return c.detectDivergence(ctx, trace, now) } // see VerifyHeader @@ -761,7 +760,7 @@ func (c *Client) verifySkippingAgainstPrimary( // If some intermediate header is invalid, replace the primary and try // again. - c.logger.Error("primary sent invalid header -> replacing", "err", err) + c.logger.Error("primary sent invalid header -> replacing", "err", err, "primary", c.primary) replaceErr := c.replacePrimaryProvider() if replaceErr != nil { c.logger.Error("Can't replace primary", "err", replaceErr) @@ -846,7 +845,7 @@ func (c *Client) Witnesses() []provider.Provider { // Cleanup removes all the data (headers and validator sets) stored. Note: the // client must be stopped at this point. func (c *Client) Cleanup() error { - c.logger.Info("Removing all the data") + c.logger.Info("Removing all light blocks") c.latestTrustedBlock = nil return c.trustedStore.Prune(0) } @@ -925,7 +924,7 @@ func (c *Client) backwards( "newHeight", interimHeader.Height, "newHash", hash2str(interimHeader.Hash())) if err := VerifyBackwards(interimHeader, verifiedHeader); err != nil { - c.logger.Error("primary sent invalid header -> replacing", "err", err) + c.logger.Error("primary sent invalid header -> replacing", "err", err, "primary", c.primary) if replaceErr := c.replacePrimaryProvider(); replaceErr != nil { c.logger.Error("Can't replace primary", "err", replaceErr) // return original error @@ -961,7 +960,7 @@ func (c *Client) replacePrimaryProvider() error { defer c.providerMutex.Unlock() if len(c.witnesses) <= 1 { - return errNoWitnesses{} + return ErrNoWitnesses } c.primary = c.witnesses[0] c.witnesses = c.witnesses[1:] @@ -978,7 +977,7 @@ func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*type l, err := c.primary.LightBlock(ctx, height) c.providerMutex.Unlock() if err != nil { - c.logger.Debug("Error on light block request from primary", "error", err) + c.logger.Debug("Error on light block request from primary", "error", err, "primary", c.primary) replaceErr := c.replacePrimaryProvider() if replaceErr != nil { return nil, fmt.Errorf("%v. Tried to replace primary but: %w", err.Error(), replaceErr) @@ -995,6 +994,10 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S compareCtx, cancel := context.WithCancel(ctx) defer cancel() + if len(c.witnesses) < 1 { + return ErrNoWitnesses + } + errc := make(chan error, len(c.witnesses)) for i, witness := range c.witnesses { go c.compareNewHeaderWithWitness(compareCtx, errc, h, witness, i) diff --git a/light/detector.go b/light/detector.go index 7b0847089e..e6aca3c7f6 100644 --- a/light/detector.go +++ b/light/detector.go @@ -15,8 +15,7 @@ import ( // More info here: // tendermint/docs/architecture/adr-047-handling-evidence-from-light-client.md -// detectDivergence is a second wall of defense for the light client and is used -// only in the case of skipping verification which employs the trust level mechanism. +// detectDivergence is a second wall of defense for the light client. // // It takes the target verified header and compares it with the headers of a set of // witness providers that the light client is connected to. If a conflicting header @@ -42,7 +41,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig defer c.providerMutex.Unlock() if len(c.witnesses) == 0 { - return errNoWitnesses{} + return ErrNoWitnesses } // launch one goroutine per witness to retrieve the light block of the target height @@ -79,21 +78,10 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) continue } - // if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we - // return the height of the conflicting block else if it is a lunatic attack and the validator sets - // are not the same then we send the height of the common header. - commonHeight := primaryBlock.Height - if isInvalidHeader(witnessTrace[len(witnessTrace)-1].Header, primaryBlock.Header) { - // height of the common header - commonHeight = witnessTrace[0].Height - } // We are suspecting that the primary is faulty, hence we hold the witness as the source of truth // and generate evidence against the primary that we can send to the witness - primaryEv := &types.LightClientAttackEvidence{ - ConflictingBlock: primaryBlock, - CommonHeight: commonHeight, // the first block in the bisection is common to both providers - } + primaryEv := newLightClientAttackEvidence(primaryBlock, witnessTrace[len(witnessTrace)-1], witnessTrace[0]) c.logger.Error("Attempted attack detected. Sending evidence againt primary by witness", "ev", primaryEv, "primary", c.primary, "witness", supportingWitness) c.sendEvidence(ctx, primaryEv, supportingWitness) @@ -118,20 +106,9 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig c.logger.Info("Error validating primary's divergent header", "primary", c.primary, "err", err) return ErrLightClientAttack } - // if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we - // return the height of the conflicting block else if it is a lunatic attack and the validator sets - // are not the same then we send the height of the common header. - commonHeight = primaryBlock.Height - if isInvalidHeader(primaryTrace[len(primaryTrace)-1].Header, witnessBlock.Header) { - // height of the common header - commonHeight = primaryTrace[0].Height - } // We now use the primary trace to create evidence against the witness and send it to the primary - witnessEv := &types.LightClientAttackEvidence{ - ConflictingBlock: witnessBlock, - CommonHeight: commonHeight, // the first block in the bisection is common to both providers - } + witnessEv := newLightClientAttackEvidence(witnessBlock, primaryTrace[len(primaryTrace)-1], primaryTrace[0]) c.logger.Error("Sending evidence against witness by primary", "ev", witnessEv, "primary", c.primary, "witness", supportingWitness) c.sendEvidence(ctx, witnessEv, c.primary) @@ -252,14 +229,22 @@ func (c *Client) examineConflictingHeaderAgainstTrace( } -// isInvalidHeader takes a trusted header and matches it againt a conflicting header -// to determine whether the conflicting header was the product of a valid state transition -// or not. If it is then all the deterministic fields of the header should be the same. -// If not, it is an invalid header and constitutes a lunatic attack. -func isInvalidHeader(trusted, conflicting *types.Header) bool { - return !bytes.Equal(trusted.ValidatorsHash, conflicting.ValidatorsHash) || - !bytes.Equal(trusted.NextValidatorsHash, conflicting.NextValidatorsHash) || - !bytes.Equal(trusted.ConsensusHash, conflicting.ConsensusHash) || - !bytes.Equal(trusted.AppHash, conflicting.AppHash) || - !bytes.Equal(trusted.LastResultsHash, conflicting.LastResultsHash) +// newLightClientAttackEvidence determines the type of attack and then forms the evidence filling out +// all the fields such that it is ready to be sent to a full node. +func newLightClientAttackEvidence(conflicted, trusted, common *types.LightBlock) *types.LightClientAttackEvidence { + ev := &types.LightClientAttackEvidence{ConflictingBlock: conflicted} + // if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we + // return the height of the conflicting block else if it is a lunatic attack and the validator sets + // are not the same then we send the height of the common header. + if ev.ConflictingHeaderIsInvalid(trusted.Header) { + ev.CommonHeight = common.Height + ev.Timestamp = common.Time + ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower() + } else { + ev.CommonHeight = trusted.Height + ev.Timestamp = trusted.Time + ev.TotalVotingPower = trusted.ValidatorSet.TotalVotingPower() + } + ev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader) + return ev } diff --git a/light/detector_test.go b/light/detector_test.go index b84a527ed0..f19ea4d567 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -63,7 +63,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { // Check verification returns an error. _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) if assert.Error(t, err) { - assert.Equal(t, err, light.ErrLightClientAttack) + assert.Equal(t, light.ErrLightClientAttack, err) } // Check evidence was sent to both full nodes. @@ -90,76 +90,86 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { } func TestLightClientAttackEvidence_Equivocation(t *testing.T) { - // primary performs an equivocation attack - var ( - latestHeight = int64(10) - valSize = 5 - divergenceHeight = int64(6) - primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) - primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) - ) - // validators don't change in this network (however we still use a map just for convenience) - witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime) - witness := mockp.New(chainID, witnessHeaders, witnessValidators) + verificationOptions := map[string]light.Option{ + "sequential": light.SequentialVerification(), + "skipping": light.SkippingVerification(light.DefaultTrustLevel), + } - for height := int64(1); height <= latestHeight; height++ { - if height < divergenceHeight { - primaryHeaders[height] = witnessHeaders[height] + for s, verificationOption := range verificationOptions { + t.Log("==> verification", s) + + // primary performs an equivocation attack + var ( + latestHeight = int64(10) + valSize = 5 + divergenceHeight = int64(6) + primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) + primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) + ) + // validators don't change in this network (however we still use a map just for convenience) + witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime) + witness := mockp.New(chainID, witnessHeaders, witnessValidators) + + for height := int64(1); height <= latestHeight; height++ { + if height < divergenceHeight { + primaryHeaders[height] = witnessHeaders[height] + primaryValidators[height] = witnessValidators[height] + continue + } + // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for + // a different block (which we do by adding txs) + primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, + bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, + witnessValidators[height], witnessValidators[height+1], hash("app_hash"), + hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) primaryValidators[height] = witnessValidators[height] - continue } - // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for - // a different block (which we do by adding txs) - primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, - bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, - witnessValidators[height], witnessValidators[height+1], hash("app_hash"), - hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) - primaryValidators[height] = witnessValidators[height] - } - primary := mockp.New(chainID, primaryHeaders, primaryValidators) + primary := mockp.New(chainID, primaryHeaders, primaryValidators) - c, err := light.NewClient( - ctx, - chainID, - light.TrustOptions{ - Period: 4 * time.Hour, - Height: 1, - Hash: primaryHeaders[1].Hash(), - }, - primary, - []provider.Provider{witness}, - dbs.New(dbm.NewMemDB(), chainID), - light.Logger(log.TestingLogger()), - light.MaxRetryAttempts(1), - ) - require.NoError(t, err) + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: primaryHeaders[1].Hash(), + }, + primary, + []provider.Provider{witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + verificationOption, + ) + require.NoError(t, err) - // Check verification returns an error. - _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) - if assert.Error(t, err) { - assert.Equal(t, err, light.ErrLightClientAttack) - } + // Check verification returns an error. + _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + if assert.Error(t, err) { + assert.Equal(t, light.ErrLightClientAttack, err) + } - // Check evidence was sent to both full nodes. - // Common height should be set to the height of the divergent header in the instance - // of an equivocation attack and the validator sets are the same as what the witness has - evAgainstPrimary := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: primaryHeaders[divergenceHeight], - ValidatorSet: primaryValidators[divergenceHeight], - }, - CommonHeight: divergenceHeight, - } - assert.True(t, witness.HasEvidence(evAgainstPrimary)) + // Check evidence was sent to both full nodes. + // Common height should be set to the height of the divergent header in the instance + // of an equivocation attack and the validator sets are the same as what the witness has + evAgainstPrimary := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: primaryHeaders[divergenceHeight], + ValidatorSet: primaryValidators[divergenceHeight], + }, + CommonHeight: divergenceHeight, + } + assert.True(t, witness.HasEvidence(evAgainstPrimary)) - evAgainstWitness := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: witnessHeaders[divergenceHeight], - ValidatorSet: witnessValidators[divergenceHeight], - }, - CommonHeight: divergenceHeight, + evAgainstWitness := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: witnessHeaders[divergenceHeight], + ValidatorSet: witnessValidators[divergenceHeight], + }, + CommonHeight: divergenceHeight, + } + assert.True(t, primary.HasEvidence(evAgainstWitness)) } - assert.True(t, primary.HasEvidence(evAgainstWitness)) } // 1. Different nodes therefore a divergent header is produced. diff --git a/light/errors.go b/light/errors.go index 54d4de88ee..a1cbaaea83 100644 --- a/light/errors.go +++ b/light/errors.go @@ -42,9 +42,10 @@ func (e ErrInvalidHeader) Error() string { // ErrFailedHeaderCrossReferencing is returned when the detector was not able to cross reference the header // with any of the connected witnesses. -var ErrFailedHeaderCrossReferencing = errors.New("all witnesses have either not responded, don't have the " + - " blocks or sent invalid blocks. You should look to change your witnesses" + - " or review the light client's logs for more information") +var ErrFailedHeaderCrossReferencing = errors.New( + `all witnesses have either not responded, don't have the blocks or sent invalid blocks. + You should look to change your witnesses or review the light client's logs for more information`, +) // ErrVerificationFailed means either sequential or skipping verification has // failed to verify from header #1 to header #2 due to some reason. @@ -60,17 +61,20 @@ func (e ErrVerificationFailed) Unwrap() error { } func (e ErrVerificationFailed) Error() string { - return fmt.Sprintf( - "verify from #%d to #%d failed: %v", - e.From, e.To, e.Reason) + return fmt.Sprintf("verify from #%d to #%d failed: %v", e.From, e.To, e.Reason) } // ErrLightClientAttack is returned when the light client has detected an attempt // to verify a false header and has sent the evidence to either a witness or primary. -var ErrLightClientAttack = errors.New("attempted attack detected." + - " Light client received valid conflicting header from witness." + - " Unable to verify header. Evidence has been sent to both providers." + - " Check logs for full evidence and trace") +var ErrLightClientAttack = errors.New(`attempted attack detected. + Light client received valid conflicting header from witness. + Unable to verify header. Evidence has been sent to both providers. + Check logs for full evidence and trace`, +) + +// ErrNoWitnesses means that there are not enough witnesses connected to +// continue running the light client. +var ErrNoWitnesses = errors.New("no witnesses connected. please reset light client") // ----------------------------- INTERNAL ERRORS --------------------------------- @@ -86,14 +90,6 @@ func (e errConflictingHeaders) Error() string { e.Block.Hash(), e.WitnessIndex) } -// errNoWitnesses means that there are not enough witnesses connected to -// continue running the light client. -type errNoWitnesses struct{} - -func (e errNoWitnesses) Error() string { - return "no witnesses connected. please reset light client" -} - // errBadWitness is returned when the witness either does not respond or // responds with an invalid header. type errBadWitness struct { diff --git a/light/mbt/doc.go b/light/mbt/doc.go new file mode 100644 index 0000000000..fdbea479af --- /dev/null +++ b/light/mbt/doc.go @@ -0,0 +1,20 @@ +// Package mbt provides a test runner for model-based tests +// +// Model-based tests are generated by +// https://github.com/informalsystems/tendermint-rs/tree/master/testgen, which +// first turns TLA+ specifications into test scenarios. Those test scenarios +// are then in turn used to generate actual fixtures representing light blocks. +// +// The test runner initializes the light client with a trusted light block. For +// each next light block, it tries to verify the block and asserts the outcome +// ("verdict" field in .json files). +// +// In the first version (v1), JSON files are directly added to the repo. In +// the future (v2), they will be generated by the testgen binary right before +// testing on CI (the number of files will be around thousands). +// +// NOTE (v1): If a breaking change is introduced into the SignedHeader or +// ValidatorSet, you will need to regenerate the JSON files using testgen +// binary (may also require modifying tendermint-rs, e.g. +// https://github.com/informalsystems/tendermint-rs/pull/647) +package mbt diff --git a/light/mbt/driver_test.go b/light/mbt/driver_test.go new file mode 100644 index 0000000000..2ff1de4b3b --- /dev/null +++ b/light/mbt/driver_test.go @@ -0,0 +1,122 @@ +package mbt + +import ( + "io/ioutil" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + tmjson "github.com/lazyledger/lazyledger-core/libs/json" + "github.com/lazyledger/lazyledger-core/light" + "github.com/lazyledger/lazyledger-core/types" +) + +const jsonDir = "./json" + +func TestVerify(t *testing.T) { + filenames := jsonFilenames(t) + + for _, filename := range filenames { + filename := filename + t.Run(filename, func(t *testing.T) { + + jsonBlob, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var tc testCase + err = tmjson.Unmarshal(jsonBlob, &tc) + if err != nil { + t.Fatal(err) + } + + t.Log(tc.Description) + + var ( + trustedSignedHeader = tc.Initial.SignedHeader + trustedNextVals = tc.Initial.NextValidatorSet + trustingPeriod = time.Duration(tc.Initial.TrustingPeriod) * time.Nanosecond + ) + + for _, input := range tc.Input { + var ( + newSignedHeader = input.LightBlock.SignedHeader + newVals = input.LightBlock.ValidatorSet + ) + + err = light.Verify( + &trustedSignedHeader, + &trustedNextVals, + newSignedHeader, + newVals, + trustingPeriod, + input.Now, + 1*time.Second, + light.DefaultTrustLevel, + ) + + t.Logf("%d -> %d", trustedSignedHeader.Height, newSignedHeader.Height) + + switch input.Verdict { + case "SUCCESS": + require.NoError(t, err) + case "NOT_ENOUGH_TRUST": + require.IsType(t, light.ErrNewValSetCantBeTrusted{}, err) + case "INVALID": + switch err.(type) { + case light.ErrOldHeaderExpired: + case light.ErrInvalidHeader: + default: + t.Fatalf("expected either ErrInvalidHeader or ErrOldHeaderExpired, but got %v", err) + } + default: + t.Fatalf("unexpected verdict: %q", input.Verdict) + } + + if err == nil { // advance + trustedSignedHeader = *newSignedHeader + trustedNextVals = *input.LightBlock.NextValidatorSet + } + } + }) + } +} + +// jsonFilenames returns a list of files in jsonDir directory +func jsonFilenames(t *testing.T) []string { + matches, err := filepath.Glob(filepath.Join(jsonDir, "*.json")) + if err != nil { + t.Fatal(err) + } + return matches +} + +type testCase struct { + Description string `json:"description"` + Initial initialData `json:"initial"` + Input []inputData `json:"input"` +} + +type initialData struct { + SignedHeader types.SignedHeader `json:"signed_header"` + NextValidatorSet types.ValidatorSet `json:"next_validator_set"` + TrustingPeriod uint64 `json:"trusting_period"` + Now time.Time `json:"now"` +} + +type inputData struct { + LightBlock lightBlockWithNextValidatorSet `json:"block"` + Now time.Time `json:"now"` + Verdict string `json:"verdict"` +} + +// In tendermint-rs, NextValidatorSet is used to verify new blocks (opposite to +// Go tendermint). +type lightBlockWithNextValidatorSet struct { + *types.SignedHeader `json:"signed_header"` + ValidatorSet *types.ValidatorSet `json:"validator_set"` + NextValidatorSet *types.ValidatorSet `json:"next_validator_set"` +} diff --git a/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustFailure.json b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustFailure.json new file mode 100644 index 0000000000..2c55794854 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustFailure.json @@ -0,0 +1,305 @@ +{ + "description": "MC4_4_faulty_Test2NotEnoughTrustFailure.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "0D038B1BA2ED7B1EF4D4E250C54D3F8D7186068658FAA53900CA83F4280B1EF2", + "part_set_header": { + "total": 1, + "hash": "0D038B1BA2ED7B1EF4D4E250C54D3F8D7186068658FAA53900CA83F4280B1EF2" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "XJC+kaVazdli/oMNHnFQOujOJLxFnez2DAUv5Uy+wPGeypkinrk2c79ZmlB5YHBTJaLh6yotq1XiLzy3zUAJAQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "pj86O2mwAQcn/MggMVEK1F6yhqnaMcxqxKyZ9DgIfFVqJIgQLb5SsuqyxPcMxxRhDTjjqfkATRGIiHPEthrFCQ==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "QssWTiluThPYflhI3bBuoeIBXlMR39I+vJb7EvLf6FVyxp0Ih7kW26wkmqjgHf0RyDAu9sny3FBrc/WbPXhFDQ==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "9xg3G66gizJBzWybdYKRtyg8c52U6vKmUT9TKb5MQ5MP/6IVCbhnvUjzw4Oe5stsnHMGvsx6Q7IVS3Ma7CbBDA==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:45:28.160326992Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "734FC4AE3FEEAD34654D611A867E3A4F2F921DD2B8F27289EFC52C90EFC2B8D8", + "part_set_header": { + "total": 1, + "hash": "734FC4AE3FEEAD34654D611A867E3A4F2F921DD2B8F27289EFC52C90EFC2B8D8" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "x7RNTkbf71fnTEyl7G6i8U5gi33nWZLha1nbZJjsIsbm7CCxcfsgU4uTWaHrZXCo1Ywok9zXgt0gaGOt7uR+BA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:18Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "E60D3DC5A38CE0773BF911BE62514F5FE6C12FA574F0571965E8EDE2D8899C01", + "part_set_header": { + "total": 1, + "hash": "E60D3DC5A38CE0773BF911BE62514F5FE6C12FA574F0571965E8EDE2D8899C01" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "L5MQUXKrrRk9I/wnx3Pai49qFdzSkkYRzM9eO7gOI5ofG2LaJoDMttkCKp2kp9/3koSWssnX+/Uuvy62XU/hCA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:18Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:02Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "2EA87BC69EB6739C5A1E06BCA5E7C9B8A5C163EB1ECF01EDD1A4A9B167C313C5", + "part_set_header": { + "total": 1, + "hash": "2EA87BC69EB6739C5A1E06BCA5E7C9B8A5C163EB1ECF01EDD1A4A9B167C313C5" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "bSYYr6R4pu+tcq8ji6Jnnf5EkMPcCImyROgN16KNQxzvw82fLVQ2C+E3Ry9vEV86G0fQBaxL6SFd8xers7zzDw==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "jlxTNsZ8h1uyVjWndZrvBAZpAonQhfSoC/MZSwWb0tIgpJ4/YlqUQZoRnr+QsV5btJfpDeknFD++5LAjUcsrDg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:22Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustSuccess.json b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustSuccess.json new file mode 100644 index 0000000000..1aab886f3e --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustSuccess.json @@ -0,0 +1,462 @@ +{ + "description": "MC4_4_faulty_Test2NotEnoughTrustSuccess.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D", + "part_set_header": { + "total": 1, + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "8rGIxi7DjBLFlHUo/lAgTpmzsnTZ8HOgnQaIoe+HEM5AmrjBaVDWVMb5/nNAnJTj4hcReCh4jviXcyRkItFJCA==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "3cXnzhzJLKeF47ulcIWjgqsv9JBf9olbAo0mcjo7Ij6TfmCpJO6SmTiacBkiznsFSOc1ZSH+cHDBKA4AT7ozAg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "4O8c5hxoHR861ldolxeY9W1iXCdxYJVIf0xD3+sANSxo0ipXayv8IS7YFw1zzZvDbjRRazVzbfyBYf2jl4JeDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "2Hel7uygQXpjYRJZiwtPLKNxT2Tg1/F5Zzs3VZpleFII9H1e5Gs02UjU0lybSXBKk/tD+NXPsdchrH/6/DmwAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:45:11.160326991Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785", + "part_set_header": { + "total": 1, + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "hiszzyt898e+HMgChDiyWNjWpbLMQ1Kfcb1Mm8KgZM4DYdvJT79fHy/N7W08y6/9DquZKlZz6hM1GTBfrZ6ODg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "747249C8038E41C91EB3B737BAC2245F5F41B1527ABB7486C02CDF69C6B0DB53", + "part_set_header": { + "total": 1, + "hash": "747249C8038E41C91EB3B737BAC2245F5F41B1527ABB7486C02CDF69C6B0DB53" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "0sxZvRkF35OZ/ALf6xufgcP9QEeqd7mhXBD7nZ36CTSbYeeBVtEDspyz/M64UQ9PyADWkG9VtbB7zZhWEArOAg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "8F5D783FEDA6E53A6333DAB6324D567395D9189B4BBB51E3A9F2F360B667E928", + "part_set_header": { + "total": 1, + "hash": "8F5D783FEDA6E53A6333DAB6324D567395D9189B4BBB51E3A9F2F360B667E928" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "5uF6x606UvPT7JLmjEUZE6yoA5uaQU1HTi3cUgTNAeNwExwvwPsj2ERy5qxBYEzQP587g2NPDrylzHagFVmJDQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "PDSL3wHNLYafgBDZ04JTHUjtQPK4LbT7FpglwYAXlfD1K51Soq4L4QUsiHqUfpp7+gykLJzluYhNQcWDLju4Dg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "Toe2ayrfxX2g/eMST8ggDIKp127ZAKUWgvw0F716mfg7jTJA6WGtDzPzPueLkBUbIyqQvcjWuuoR5FV4WnMBCQ==" + }, + { + "block_id_flag": 1, + "validator_address": null, + "timestamp": null, + "signature": null + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785", + "part_set_header": { + "total": 1, + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "hiszzyt898e+HMgChDiyWNjWpbLMQ1Kfcb1Mm8KgZM4DYdvJT79fHy/N7W08y6/9DquZKlZz6hM1GTBfrZ6ODg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustFailure.json b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustFailure.json new file mode 100644 index 0000000000..1ac9a7b2a1 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustFailure.json @@ -0,0 +1,538 @@ +{ + "description": "MC4_4_faulty_Test3NotEnoughTrustFailure.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "F7DC6F348F04E01EC7DEA4348A3BFA2F0D7533900986EA66F6006C70BDD52D2E", + "part_set_header": { + "total": 1, + "hash": "F7DC6F348F04E01EC7DEA4348A3BFA2F0D7533900986EA66F6006C70BDD52D2E" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "S5wM4flAsMJ7uGSGduppmUqDeFZBUBFKkp+LTy249+AgM3oup9ULs7eUzNiwjhV4gWnPnLJ91m6IZ3s047xzAg==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "ZLOGEO5mgrVoTpFA5DLMLX0ggBWnWLWmMF5tAorZC732T+oR2u2USAvGhkZtpM73WN3NUp04aVHInGMsYtz9Dg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "Lwa9l7+dJci4+mXD9ZsvLnbX0TuzWYIjfj9vU51rAftFRGEig7DHToufWaMfjwGMN53WrG72YfHAXxBigWaBBg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "SSHBm3HdeyC1fgPqjTp647mRGxaCKA/GGraM0UFcuXv3mUjfjowL8CNjthJHgXIQCmYdF0HDwLZb1SCvWFe0Aw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:46:51.160327001Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8", + "part_set_header": { + "total": 1, + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "lDmtsNALIr3ZysmMkrYW5jPufVGQcR7U2rpGFwJfFeTQSohqm9yVjzLVeGsPZFjdmGUltxwi7nH63iIIjl7VCg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0", + "part_set_header": { + "total": 1, + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "KZ0VUajBcnvw1Lp7DnYFGTPt6sstretUcfMY9nkszfQtvcJ1x4sFvJ/D0LWkpsNVMtNSWYobw+gfETQLVbmAAQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "E98C8412BF8736722EEBFF209C5D0AB9F82B599344D043139B4D4747E1FF21EE", + "part_set_header": { + "total": 1, + "hash": "E98C8412BF8736722EEBFF209C5D0AB9F82B599344D043139B4D4747E1FF21EE" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "V2LEkvNw6vwCh5t/eTqOE0QMnRveeNV6nS9bqAD8S/dDtVnzUTwfwEgEHPwPFJDkszVkZ/9pqoKTInoO2bsHAg==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "AyDrm3XpFjB1OWJdYegH3dYp+Q9ZXV/kAstddVzpvU4pL187Tad2bNMqcgoroTiwaCWC7jtOrHd4l8Tq5myjDA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "cyQzNOgKd1OKNQJChG/E0pk9+fZ4p8bIpAqD5oZy0xT+e1DywIVUVDx0LBqbfm38C4djq3klKMvTUwTcDypCDQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8", + "part_set_header": { + "total": 1, + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "lDmtsNALIr3ZysmMkrYW5jPufVGQcR7U2rpGFwJfFeTQSohqm9yVjzLVeGsPZFjdmGUltxwi7nH63iIIjl7VCg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0", + "part_set_header": { + "total": 1, + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "KZ0VUajBcnvw1Lp7DnYFGTPt6sstretUcfMY9nkszfQtvcJ1x4sFvJ/D0LWkpsNVMtNSWYobw+gfETQLVbmAAQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustSuccess.json b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustSuccess.json new file mode 100644 index 0000000000..e4c5a864d2 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustSuccess.json @@ -0,0 +1,662 @@ +{ + "description": "MC4_4_faulty_Test3NotEnoughTrustSuccess.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5", + "part_set_header": { + "total": 1, + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "q0CS2J0SFpdIVuqaHEmdp8epPcZli61bfVkdA720J+TzJ06ahepHUry6P/ZD+ex6GuQcSjBP6mfzp0ksjqf3BQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "jKDmxZTfFv5xlj3byRSxV8gMDQUirQE4O8hPKvp9EvmIWwCX1S7D/qQo+GhCvfiF3QPdQ3kRCpdvwrTuq+6RBA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "AL2jwdkeW/o9DjLU3vfcqKG9QCqnhKxdPN4i/miR6FIA87v4Y45jFvZw8Ue6hhwkGKs3d1QghJXVlRJFg8VXDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "gV5npKv90ghI2wj2MP06qkVyWTbjBwBzdQnBS3ssggEE+is/BRMQQfKEKpmTAF0KIS+eZj7jmj8b+isxC3QfDw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:46:06.160326996Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3", + "part_set_header": { + "total": 1, + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "/GSuwjyWvPmjmcCtrg+00QmcjerrnGZueyLJvAJxhJ5gumkVvCvXB05HDoHL0D523nJHR9hMBOFMA+7cywRoCA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "wrXQ0BgJMF8EV+CWmmGersvCF9RI6/qbhBPxgAcLixV65N8RiWGba+sCfr9UHjHAEYsCsyFgQR2OLC7Bg1PKBA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23", + "part_set_header": { + "total": 1, + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "e/kNbh2aUmnowrri9eWLo9Wf1ZuPS1cobu+ITfz0uFn8LZcQtrQXkB7sfRrTDfRGvOkm3CpWnxD+UeQTxa12CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:07Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:02Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "FE0A34650DA8A9402EA231A4D03FD1F39E0D7F894456D7268A582244FB968605", + "part_set_header": { + "total": 1, + "hash": "FE0A34650DA8A9402EA231A4D03FD1F39E0D7F894456D7268A582244FB968605" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "aVeQWMH20B1IGFIwH50HDv3qrDsvbuCuco918Spc/nHc06YJ9LYLSvo8gd7g4EoCY71eRLwPLOoHXk8Nas+XAw==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3", + "part_set_header": { + "total": 1, + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "/GSuwjyWvPmjmcCtrg+00QmcjerrnGZueyLJvAJxhJ5gumkVvCvXB05HDoHL0D523nJHR9hMBOFMA+7cywRoCA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "wrXQ0BgJMF8EV+CWmmGersvCF9RI6/qbhBPxgAcLixV65N8RiWGba+sCfr9UHjHAEYsCsyFgQR2OLC7Bg1PKBA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23", + "part_set_header": { + "total": 1, + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "e/kNbh2aUmnowrri9eWLo9Wf1ZuPS1cobu+ITfz0uFn8LZcQtrQXkB7sfRrTDfRGvOkm3CpWnxD+UeQTxa12CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3", + "part_set_header": { + "total": 1, + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "/GSuwjyWvPmjmcCtrg+00QmcjerrnGZueyLJvAJxhJ5gumkVvCvXB05HDoHL0D523nJHR9hMBOFMA+7cywRoCA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "wrXQ0BgJMF8EV+CWmmGersvCF9RI6/qbhBPxgAcLixV65N8RiWGba+sCfr9UHjHAEYsCsyFgQR2OLC7Bg1PKBA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestFailure.json b/light/mbt/json/MC4_4_faulty_TestFailure.json new file mode 100644 index 0000000000..a63b5f1b6d --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestFailure.json @@ -0,0 +1,347 @@ +{ + "description": "MC4_4_faulty_TestFailure.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "658DEEC010B33EDB1977FA7B38087A8C547D65272F6A63854959E517AAD20597", + "part_set_header": { + "total": 1, + "hash": "658DEEC010B33EDB1977FA7B38087A8C547D65272F6A63854959E517AAD20597" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "gUvww0D+bCNnq0wY4GvDkWAUQO3kbi9YvmoRBAC3goRZ6mW8Fh6V9hrMQYbpRpf7LZqFAdnleFgXnnEuKz17Bg==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "54nTri+VJoBu8HCTb+c92aYrPiMSM71qVDkdRtwmE40LWPUFkTJNTqTLXbBXutQ1p5s6PyuB+p4UfWAwYCuUCQ==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "PWesm77j/+sQh1p00pDJv3R3B9tpe1HlfhaTS2be/5FZfq3EMH3ceplTSNGsQKo0p4f8N9UUq+TYwm+3dsZeBg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "ngAHu3FpNX6aW4B7xmFd7ckNScOM+lfuCQuMDs7uq20UoNnnGasFOcFMXD+0dQnRndEu1RItr+0kgxKaD6OtAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:44:52.160326989Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F6AF3B9193F2672E2E3830EC49F0D7E527291DEDA4326EDB7A6FB812BE8F3251", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "F6AF3B9193F2672E2E3830EC49F0D7E527291DEDA4326EDB7A6FB812BE8F3251", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "32DD1A7D7E5C8106E14255B40F029DC568E3326512B50F45012580CD6683B9E6", + "part_set_header": { + "total": 1, + "hash": "32DD1A7D7E5C8106E14255B40F029DC568E3326512B50F45012580CD6683B9E6" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "RL9tPx8XS753xu4ziuoICsAVRmqhu34gx3NN0gsNGQw+HvECVb77g9pvcapRPDkkVf89be6dAIy/WjrsfATGDg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "kqxDTznWv65+GJA08AV4JTMBeKzDaG7jAhMA7P4YgFkM2KDKw2vOBw0R4LnLkzZQWJUkbzXeYRHcVoJlT35JAg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "aWEOTgdl9m5vBKDSCrUloM/2AfUp+SNDqbpJFEuhBv0DYmeRJDCEoeQnGACjaZHjW4LjaxgNnTOSBVFlaP/vAg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:16Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "A14AED7ED200C7F85F89C9A43029E0CE88691532193E198E3F45AA3375AE8D01", + "part_set_header": { + "total": 1, + "hash": "A14AED7ED200C7F85F89C9A43029E0CE88691532193E198E3F45AA3375AE8D01" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "42tNU0fwo3UKq2toY2p39ykL6ZhWrCIoGjzE5O0mmvn92SZHAg1OUGmn4c5bUF6H2kNKZXCn6Zp6T/UxhlEOBQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:16Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "F6AF3B9193F2672E2E3830EC49F0D7E527291DEDA4326EDB7A6FB812BE8F3251", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "AFABB1F6927F1D7845EA474BCF523AF948644C7B1301CBC17B8A264903B9AD16", + "part_set_header": { + "total": 1, + "hash": "AFABB1F6927F1D7845EA474BCF523AF948644C7B1301CBC17B8A264903B9AD16" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "If14ddLKYwosISJdPovBpU2K1+R91ZqDY/JAyuPsGXCXm70ZyciRQBoGEOVVzAs3s3hfc+OZAScGtpK8meyxDw==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:24Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestHeaderFromFuture.json b/light/mbt/json/MC4_4_faulty_TestHeaderFromFuture.json new file mode 100644 index 0000000000..856e0676e3 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestHeaderFromFuture.json @@ -0,0 +1,162 @@ +{ + "description": "MC4_4_faulty_TestHeaderFromFuture.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5", + "part_set_header": { + "total": 1, + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "q0CS2J0SFpdIVuqaHEmdp8epPcZli61bfVkdA720J+TzJ06ahepHUry6P/ZD+ex6GuQcSjBP6mfzp0ksjqf3BQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "jKDmxZTfFv5xlj3byRSxV8gMDQUirQE4O8hPKvp9EvmIWwCX1S7D/qQo+GhCvfiF3QPdQ3kRCpdvwrTuq+6RBA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "AL2jwdkeW/o9DjLU3vfcqKG9QCqnhKxdPN4i/miR6FIA87v4Y45jFvZw8Ue6hhwkGKs3d1QghJXVlRJFg8VXDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "gV5npKv90ghI2wj2MP06qkVyWTbjBwBzdQnBS3ssggEE+is/BRMQQfKEKpmTAF0KIS+eZj7jmj8b+isxC3QfDw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:47:33.160327005Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:23:25Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "E624CE5E2693812E58E8DBB64C7A05149A58157114D34F08CB5992FE2BECC0A7", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "4A71282D7A0FA97B3809C24291E6894081710CDA0264FE31631BD524B8D62CB2", + "part_set_header": { + "total": 1, + "hash": "4A71282D7A0FA97B3809C24291E6894081710CDA0264FE31631BD524B8D62CB2" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:23:25Z", + "signature": "io43cjLaPTzkNYsEpPZhKLkh1YJzM/ZOm0JZI6Qq9KzFZODOPMpSYaitHTHeJV0gIPh/X/29A/QKd62ByAuiBQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:24Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestSuccess.json b/light/mbt/json/MC4_4_faulty_TestSuccess.json new file mode 100644 index 0000000000..9943b44b00 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestSuccess.json @@ -0,0 +1,479 @@ +{ + "description": "MC4_4_faulty_TestSuccess.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D", + "part_set_header": { + "total": 1, + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "8rGIxi7DjBLFlHUo/lAgTpmzsnTZ8HOgnQaIoe+HEM5AmrjBaVDWVMb5/nNAnJTj4hcReCh4jviXcyRkItFJCA==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "3cXnzhzJLKeF47ulcIWjgqsv9JBf9olbAo0mcjo7Ij6TfmCpJO6SmTiacBkiznsFSOc1ZSH+cHDBKA4AT7ozAg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "4O8c5hxoHR861ldolxeY9W1iXCdxYJVIf0xD3+sANSxo0ipXayv8IS7YFw1zzZvDbjRRazVzbfyBYf2jl4JeDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "2Hel7uygQXpjYRJZiwtPLKNxT2Tg1/F5Zzs3VZpleFII9H1e5Gs02UjU0lybSXBKk/tD+NXPsdchrH/6/DmwAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:44:35.160326987Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7", + "part_set_header": { + "total": 1, + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "5RJxkKr19lA4YOg848c0NfTB0qID+klbglOH4iugPMcnjwpsgwP3p+re65uFNe7NNO3D0c5CUQX6bA9TpwO5CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:19Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "6A1D90F13DCA0E65251D3DA8A07EA17A86CF79E340729DFEF165AC90FF9C2080", + "part_set_header": { + "total": 1, + "hash": "6A1D90F13DCA0E65251D3DA8A07EA17A86CF79E340729DFEF165AC90FF9C2080" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "Dg+9iWPS+P6d10RSuIgXKlC5e4IvY4/VU0fsIeCnBk5xRcjnQVy7FObhrDTLdXDo6NVd29h+ypEiLGfwPEa/CA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:02Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "AAFE392AA939DA2A051F3C57707569B1836F93ACC8F35B57BB3CDF615B649013", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "DE957F0FC7A17229F36289714559F7FB5E908DEE04E549FF88DB72404E118581", + "part_set_header": { + "total": 1, + "hash": "DE957F0FC7A17229F36289714559F7FB5E908DEE04E549FF88DB72404E118581" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "rHPemviCweCd95mauh9ST0eW6KsC5A/melokemcZ3gH22+tcIDbLy+vkyXXgpAANKgXcblIkpflI/YJ8IaiJCQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "wUAMEasU8Rry1Z9xa5/VZTUYWHvp41vz0eUir0jl3QjVXqNS+cJgduEvu7e0uZSMjrLf2le8XKXVz2H767Z0Dw==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "+O0Pp6P+CtNt0QzY3YYPBqr2CPcCOXb3CwWR+1xTUMNDkRDLQK8UP12QdHsdqRB8Ocm2+ZKj8OTVv0uUWWPuCA==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "QENfv06GEZj6QY64sPLTnditix/SreqiaFoQxWIpwd6mbHx0sHhk0E6z+nw8MzKssaKE7wD3km3gHEYzKnJNCg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7", + "part_set_header": { + "total": 1, + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "5RJxkKr19lA4YOg848c0NfTB0qID+klbglOH4iugPMcnjwpsgwP3p+re65uFNe7NNO3D0c5CUQX6bA9TpwO5CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:21Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestUntrustedBeforeTrusted.json b/light/mbt/json/MC4_4_faulty_TestUntrustedBeforeTrusted.json new file mode 100644 index 0000000000..ea57eacc9e --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestUntrustedBeforeTrusted.json @@ -0,0 +1,170 @@ +{ + "description": "MC4_4_faulty_TestUntrustedBeforeTrusted.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D", + "part_set_header": { + "total": 1, + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "8rGIxi7DjBLFlHUo/lAgTpmzsnTZ8HOgnQaIoe+HEM5AmrjBaVDWVMb5/nNAnJTj4hcReCh4jviXcyRkItFJCA==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "3cXnzhzJLKeF47ulcIWjgqsv9JBf9olbAo0mcjo7Ij6TfmCpJO6SmTiacBkiznsFSOc1ZSH+cHDBKA4AT7ozAg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "4O8c5hxoHR861ldolxeY9W1iXCdxYJVIf0xD3+sANSxo0ipXayv8IS7YFw1zzZvDbjRRazVzbfyBYf2jl4JeDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "2Hel7uygQXpjYRJZiwtPLKNxT2Tg1/F5Zzs3VZpleFII9H1e5Gs02UjU0lybSXBKk/tD+NXPsdchrH/6/DmwAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:47:47.160327006Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:00Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "730D3D6B2E9F4F0F23879458F2D02E0004F0F241" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "D1E7988F2C5B176E1AE1D7CA03F17A2734B1A90B154D41D0C01FEE49BA63DBAA", + "part_set_header": { + "total": 1, + "hash": "D1E7988F2C5B176E1AE1D7CA03F17A2734B1A90B154D41D0C01FEE49BA63DBAA" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:00Z", + "signature": "lJ/0qcg9/3PcEtnDSR10pswu0kZjfD8GSp03Esc/O6Odg8v20ZFIZCLUEbyFays23MfMpI08bYJrF9QnKjMQAw==" + } + ] + } + }, + "validator_set": { + "validators": [] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:24Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestValsetDifferentAllSteps.json b/light/mbt/json/MC4_4_faulty_TestValsetDifferentAllSteps.json new file mode 100644 index 0000000000..03e5734430 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestValsetDifferentAllSteps.json @@ -0,0 +1,371 @@ +{ + "description": "MC4_4_faulty_TestValsetDifferentAllSteps.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "010ED897B4B347175BC54ADF87D640393862FF3D5038302CD523B0E97FC20079", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "42C62AB26BDCD052FD7D87449C1CA700A79780D55E2FC8129614D4D2DC24CB08", + "part_set_header": { + "total": 1, + "hash": "42C62AB26BDCD052FD7D87449C1CA700A79780D55E2FC8129614D4D2DC24CB08" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "mzNheVmshOSGCNfL/NfBBpJcofUx6cqclvEMOc9rZJ6A2pOrxO8ZymXej0FvksZ5mmhfLvZ0aW+as59WMldWBw==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "KisuL/gVSTDQP1Q51uBKd8xDZM4mX+rRKIpMlkfUYF+qW4K51sPvqL/pgKSiUwBPAoGRBzwLoavPg9oiyRwPBA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "fgq+19zjPxTp8HILDBaW8VJg+wzyVkthtmf0HJxdoaXd+uZRQ7LDS2Tn7LXMKAQ9Q0sjtZ4BA3H3sfv9wA56BA==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "Zy0rovAtLk58hTcprpXU7ikCdbky5rrQ8Y3o+/Xyo7VTt3zYiCdVsYj26agu8SR3cFkV96P2ryHF6NHWGwIJDw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:47:18.160327003Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "943FD341C1558245A93577E0A7CF48089B9E0FA175DE817A61EF7233AF810BF6", + "part_set_header": { + "total": 1, + "hash": "943FD341C1558245A93577E0A7CF48089B9E0FA175DE817A61EF7233AF810BF6" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "Y9RiUiuj/kTgPU1BCNrWbNSHEcyf3nr1o0ohY1xkRf89rYRu34oJSWU65paMAfPAosfeaHHPjYXG2whJk+dGBQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "48A8E428AF500C9BD5674A9A2FC1217DD97B144FD623DDD2C4679022E19A5615", + "part_set_header": { + "total": 1, + "hash": "48A8E428AF500C9BD5674A9A2FC1217DD97B144FD623DDD2C4679022E19A5615" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "WUKkETiWSMDSgd/7sxOD8KgDrL/kg78vXbA2r42+qEvuzZSuwob+7yHXYEn32lDtLl5lnsENVIjtqUrEPkQKBg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "3H9a3YJJjqewYR3HhSMxM3yAy0niBUhWX0+6K67UJVeEtXXVIk/OQJ9HeVmghsayGEJGvzcyjbHDD9CIkk/VDw==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "010ED897B4B347175BC54ADF87D640393862FF3D5038302CD523B0E97FC20079", + "next_validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "consensus_hash": "010ED897B4B347175BC54ADF87D640393862FF3D5038302CD523B0E97FC20079", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "208411D47FC3C56A3243E8BA57010A144BAD926F2FEFFBFDFB695CF19D2788CF", + "part_set_header": { + "total": 1, + "hash": "208411D47FC3C56A3243E8BA57010A144BAD926F2FEFFBFDFB695CF19D2788CF" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "EDJIttaUcyoVcfIyOdHTw6qmtY8Jrf5cEMquCYOxnahu6BUNYbomz8L2t0uscbJqrDzMaW1nGDAyNrIEoBlnDQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "QtatsO+ghgyDEJKDMmoVKdeDT8E3srh7WecyladY0ityBF9TKcrBNBIImCvPlStVu5uUbmM5NbG9+2In/F3DDA==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "RJ9f2beJHCxhuYBHmPc3oWdDlQ8DOfBJOz9vN8tvEmhA0zb2qE9Zxe4jyO7Xr9wvq09yXQShTZKDsjOhOF6GAQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/provider/http/http.go b/light/provider/http/http.go index 6fcf3b85cd..caf139c158 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -51,11 +51,6 @@ func NewWithClient(chainID string, client rpcclient.RemoteClient) provider.Provi } } -// ChainID returns a chainID this provider was configured with. -func (p *http) ChainID() string { - return p.chainID -} - func (p *http) String() string { return fmt.Sprintf("http{%s}", p.client.Remote()) } diff --git a/light/provider/mock/deadmock.go b/light/provider/mock/deadmock.go index 323943e8c2..b220078bc3 100644 --- a/light/provider/mock/deadmock.go +++ b/light/provider/mock/deadmock.go @@ -3,6 +3,7 @@ package mock import ( "context" "errors" + "fmt" "github.com/lazyledger/lazyledger-core/light/provider" "github.com/lazyledger/lazyledger-core/types" @@ -11,17 +12,17 @@ import ( var errNoResp = errors.New("no response from provider") type deadMock struct { - chainID string + id string } -// NewDeadMock creates a mock provider that always errors. -func NewDeadMock(chainID string) provider.Provider { - return &deadMock{chainID: chainID} +// NewDeadMock creates a mock provider that always errors. id is used in case of multiple providers. +func NewDeadMock(id string) provider.Provider { + return &deadMock{id: id} } -func (p *deadMock) ChainID() string { return p.chainID } - -func (p *deadMock) String() string { return "deadMock" } +func (p *deadMock) String() string { + return fmt.Sprintf("DeadMock-%s", p.id) +} func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { return nil, errNoResp diff --git a/light/provider/mock/mock.go b/light/provider/mock/mock.go index 8674c4f817..9e8310ae8b 100644 --- a/light/provider/mock/mock.go +++ b/light/provider/mock/mock.go @@ -11,7 +11,7 @@ import ( ) type Mock struct { - chainID string + id string headers map[int64]*types.SignedHeader vals map[int64]*types.ValidatorSet evidenceToReport map[string]types.Evidence // hash => evidence @@ -21,20 +21,15 @@ var _ provider.Provider = (*Mock)(nil) // New creates a mock provider with the given set of headers and validator // sets. -func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) *Mock { +func New(id string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) *Mock { return &Mock{ - chainID: chainID, + id: id, headers: headers, vals: vals, evidenceToReport: make(map[string]types.Evidence), } } -// ChainID returns the blockchain ID. -func (p *Mock) ChainID() string { - return p.chainID -} - func (p *Mock) String() string { var headers strings.Builder for _, h := range p.headers { @@ -46,7 +41,7 @@ func (p *Mock) String() string { fmt.Fprintf(&vals, " %X", v.Hash()) } - return fmt.Sprintf("Mock{headers: %s, vals: %v}", headers.String(), vals.String()) + return fmt.Sprintf("Mock{id: %s, headers: %s, vals: %v}", p.id, headers.String(), vals.String()) } func (p *Mock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { diff --git a/light/provider/provider.go b/light/provider/provider.go index 50a47c71f3..47b17c5691 100644 --- a/light/provider/provider.go +++ b/light/provider/provider.go @@ -9,9 +9,6 @@ import ( // Provider provides information for the light client to sync (verification // happens in the client). type Provider interface { - // ChainID returns the blockchain ID. - ChainID() string - // LightBlock returns the LightBlock that corresponds to the given // height. // diff --git a/light/proxy/routes.go b/light/proxy/routes.go index d999495e59..748bd19426 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -3,6 +3,7 @@ package proxy import ( "github.com/lazyledger/lazyledger-core/libs/bytes" lrpc "github.com/lazyledger/lazyledger-core/light/rpc" + rpcclient "github.com/lazyledger/lazyledger-core/rpc/client" ctypes "github.com/lazyledger/lazyledger-core/rpc/core/types" rpcserver "github.com/lazyledger/lazyledger-core/rpc/jsonrpc/server" rpctypes "github.com/lazyledger/lazyledger-core/rpc/jsonrpc/types" @@ -213,11 +214,17 @@ func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { } } -type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) +type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, + data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { - return func(ctx *rpctypes.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQuery(ctx.Context(), path, data) + return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, + height int64, prove bool) (*ctypes.ResultABCIQuery, error) { + + return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ + Height: height, + Prove: prove, + }) } } diff --git a/light/store/db/db.go b/light/store/db/db.go index c2ebcf594f..de37a1c319 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -238,7 +238,7 @@ func (s *dbs) Prune(size uint16) error { append(s.lbKey(1<<63-1), byte(0x00)), ) if err != nil { - return err + panic(err) } defer itr.Close() diff --git a/light/verifier.go b/light/verifier.go index 8eaef2003e..2a45001c95 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -29,6 +29,7 @@ var ( // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. +// trustedHeader must have a ChainID, Height and Time func VerifyNonAdjacent( trustedHeader *types.SignedHeader, // height=X trustedVals *types.ValidatorSet, // height=X or height=X+1 @@ -39,10 +40,16 @@ func VerifyNonAdjacent( maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { + checkRequiredHeaderFields(trustedHeader) + if untrustedHeader.Height == trustedHeader.Height+1 { return errors.New("headers must be non adjacent in height") } + if err := ValidateTrustLevel(trustLevel); err != nil { + return err + } + if HeaderExpired(trustedHeader, trustingPeriod, now) { return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now} } @@ -54,14 +61,15 @@ func VerifyNonAdjacent( return ErrInvalidHeader{err} } - // Ensure that +`trustLevel` (default 1/3) or more of last trusted validators signed correctly. + // Ensure that +`trustLevel` (default 1/3) or more in voting power of the last trusted validator + // set signed correctly. err := trustedVals.VerifyCommitLightTrusting(trustedHeader.ChainID, untrustedHeader.Commit, trustLevel) if err != nil { switch e := err.(type) { case types.ErrNotEnoughVotingPowerSigned: return ErrNewValSetCantBeTrusted{e} default: - return e + return ErrInvalidHeader{e} } } @@ -90,6 +98,7 @@ func VerifyNonAdjacent( // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. +// trustedHeader must have a ChainID, Height, Time and NextValidatorsHash func VerifyAdjacent( trustedHeader *types.SignedHeader, // height=X untrustedHeader *types.SignedHeader, // height=X+1 @@ -98,6 +107,12 @@ func VerifyAdjacent( now time.Time, maxClockDrift time.Duration) error { + checkRequiredHeaderFields(trustedHeader) + + if len(trustedHeader.NextValidatorsHash) == 0 { + panic("next validators hash in trusted header is empty") + } + if untrustedHeader.Height != trustedHeader.Height+1 { return errors.New("headers must be adjacent in height") } @@ -119,7 +134,7 @@ func VerifyAdjacent( trustedHeader.NextValidatorsHash, untrustedHeader.ValidatorsHash, ) - return err + return ErrInvalidHeader{err} } // Ensure that +2/3 of new validators signed correctly. @@ -150,47 +165,6 @@ func Verify( return VerifyAdjacent(trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift) } -func verifyNewHeaderAndVals( - untrustedHeader *types.SignedHeader, - untrustedVals *types.ValidatorSet, - trustedHeader *types.SignedHeader, - now time.Time, - maxClockDrift time.Duration) error { - - if err := untrustedHeader.ValidateBasic(trustedHeader.ChainID); err != nil { - return fmt.Errorf("untrustedHeader.ValidateBasic failed: %w", err) - } - - if untrustedHeader.Height <= trustedHeader.Height { - return fmt.Errorf("expected new header height %d to be greater than one of old header %d", - untrustedHeader.Height, - trustedHeader.Height) - } - - if !untrustedHeader.Time.After(trustedHeader.Time) { - return fmt.Errorf("expected new header time %v to be after old header time %v", - untrustedHeader.Time, - trustedHeader.Time) - } - - if !untrustedHeader.Time.Before(now.Add(maxClockDrift)) { - return fmt.Errorf("new header has a time from the future %v (now: %v; max clock drift: %v)", - untrustedHeader.Time, - now, - maxClockDrift) - } - - if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { - return fmt.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d", - untrustedHeader.ValidatorsHash, - untrustedVals.Hash(), - untrustedHeader.Height, - ) - } - - return nil -} - // ValidateTrustLevel checks that trustLevel is within the allowed range [1/3, // 1]. If not, it returns an error. 1/3 is the minimum amount of trust needed // which does not break the security model. @@ -217,14 +191,16 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time // c) that the LastBlockID hash of the trusted header is the same as the hash // of the trusted header // -// For any of these cases ErrInvalidHeader is returned. +// For any of these cases ErrInvalidHeader is returned. +// NOTE: This does not check whether the trusted header has expired or not. func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { if err := untrustedHeader.ValidateBasic(); err != nil { return ErrInvalidHeader{err} } if untrustedHeader.ChainID != trustedHeader.ChainID { - return ErrInvalidHeader{errors.New("header belongs to another chain")} + return ErrInvalidHeader{fmt.Errorf("new header belongs to a different chain (%s != %s)", + untrustedHeader.ChainID, trustedHeader.ChainID)} } if !untrustedHeader.Time.Before(trustedHeader.Time) { @@ -243,3 +219,59 @@ func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { return nil } + +func verifyNewHeaderAndVals( + untrustedHeader *types.SignedHeader, + untrustedVals *types.ValidatorSet, + trustedHeader *types.SignedHeader, + now time.Time, + maxClockDrift time.Duration) error { + + if err := untrustedHeader.ValidateBasic(trustedHeader.ChainID); err != nil { + return fmt.Errorf("untrustedHeader.ValidateBasic failed: %w", err) + } + + if untrustedHeader.Height <= trustedHeader.Height { + return fmt.Errorf("expected new header height %d to be greater than one of old header %d", + untrustedHeader.Height, + trustedHeader.Height) + } + + if !untrustedHeader.Time.After(trustedHeader.Time) { + return fmt.Errorf("expected new header time %v to be after old header time %v", + untrustedHeader.Time, + trustedHeader.Time) + } + + if !untrustedHeader.Time.Before(now.Add(maxClockDrift)) { + return fmt.Errorf("new header has a time from the future %v (now: %v; max clock drift: %v)", + untrustedHeader.Time, + now, + maxClockDrift) + } + + if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { + return fmt.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d", + untrustedHeader.ValidatorsHash, + untrustedVals.Hash(), + untrustedHeader.Height, + ) + } + + return nil +} + +func checkRequiredHeaderFields(h *types.SignedHeader) { + if h.Height == 0 { + panic("height in trusted header must be set (non zero") + } + + zeroTime := time.Time{} + if h.Time == zeroTime { + panic("time in trusted header must be set") + } + + if h.ChainID == "" { + panic("chain ID in trusted header must be set") + } +} diff --git a/light/verifier_test.go b/light/verifier_test.go index e694255b4e..133ee88d26 100644 --- a/light/verifier_test.go +++ b/light/verifier_test.go @@ -318,12 +318,10 @@ func TestValidateTrustLevel(t *testing.T) { 4: {tmmath.Fraction{Numerator: 4, Denominator: 5}, true}, // invalid - 5: {tmmath.Fraction{Numerator: 6, Denominator: 5}, false}, - 6: {tmmath.Fraction{Numerator: -1, Denominator: 3}, false}, - 7: {tmmath.Fraction{Numerator: 0, Denominator: 1}, false}, - 8: {tmmath.Fraction{Numerator: -1, Denominator: -3}, false}, - 9: {tmmath.Fraction{Numerator: 0, Denominator: 0}, false}, - 10: {tmmath.Fraction{Numerator: 1, Denominator: 0}, false}, + 5: {tmmath.Fraction{Numerator: 6, Denominator: 5}, false}, + 6: {tmmath.Fraction{Numerator: 0, Denominator: 1}, false}, + 7: {tmmath.Fraction{Numerator: 0, Denominator: 0}, false}, + 8: {tmmath.Fraction{Numerator: 1, Denominator: 0}, false}, } for _, tc := range testCases { diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index dfadac4146..1f18c72d52 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -3,6 +3,7 @@ package mempool import ( "bytes" "container/list" + "context" "crypto/sha256" "fmt" "sync" @@ -185,7 +186,7 @@ func (mem *CListMempool) TxsBytes() int64 { // Lock() must be help by the caller during execution. func (mem *CListMempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync() + return mem.proxyAppConn.FlushSync(context.Background()) } // XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. @@ -285,7 +286,16 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx return ErrTxInCache } - reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) + ctx := context.Background() + if txInfo.Context != nil { + ctx = txInfo.Context + } + + reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) + if err != nil { + mem.cache.Remove(tx) + return err + } reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) return nil @@ -634,17 +644,26 @@ func (mem *CListMempool) recheckTxs() { mem.recheckCursor = mem.txs.Front() mem.recheckEnd = mem.txs.Back() + ctx := context.Background() + // Push txs to proxyAppConn // NOTE: globalCb may be called concurrently. for e := mem.txs.Front(); e != nil; e = e.Next() { memTx := e.Value.(*mempoolTx) - mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ + _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ Tx: memTx.tx, Type: abci.CheckTxType_Recheck, }) + if err != nil { + // No need in retrying since memTx will be rechecked after next block. + mem.logger.Error("Can't check tx", "err", err) + } } - mem.proxyAppConn.FlushAsync() + _, err := mem.proxyAppConn.FlushAsync(ctx) + if err != nil { + mem.logger.Error("Can't flush txs", "err", err) + } } //-------------------------------------------------------------------------------- diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 44bfac5712..706c7adabc 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -1,6 +1,7 @@ package mempool import ( + "context" "crypto/rand" "crypto/sha256" "encoding/binary" @@ -121,7 +122,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 0, -1, 0}, {20, 0, 10, 0}, {20, 10, 10, 0}, - {20, 28, 10, 1}, // account for overhead in Data{} + {20, 28, 10, 1}, {20, 240, 5, 5}, {20, 240, -1, 10}, {20, 240, 10, 10}, @@ -167,6 +168,7 @@ func TestMempoolFilters(t *testing.T) { {10, PreCheckMaxBytes(10), PostCheckMaxGas(20), 0}, {10, PreCheckMaxBytes(30), PostCheckMaxGas(20), 10}, {10, PreCheckMaxBytes(28), PostCheckMaxGas(1), 10}, + {10, PreCheckMaxBytes(28), PostCheckMaxGas(1), 10}, {10, PreCheckMaxBytes(22), PostCheckMaxGas(0), 0}, } for tcIndex, tt := range tests { @@ -313,11 +315,12 @@ func TestSerialReap(t *testing.T) { } commitRange := func(start, end int) { + ctx := context.Background() // Deliver some txs. for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) + res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) if err != nil { t.Errorf("client error committing tx: %v", err) } @@ -326,7 +329,7 @@ func TestSerialReap(t *testing.T) { res.Code, res.Data, res.Log) } } - res, err := appConnCon.CommitSync() + res, err := appConnCon.CommitSync(ctx) if err != nil { t.Errorf("client error committing: %v", err) } @@ -520,10 +523,11 @@ func TestMempoolTxsBytes(t *testing.T) { t.Error(err) } }) - res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) + ctx := context.Background() + res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) require.NoError(t, err) require.EqualValues(t, 0, res.Code) - res2, err := appConnCon.CommitSync() + res2, err := appConnCon.CommitSync(ctx) require.NoError(t, err) require.NotEmpty(t, res2.Data) diff --git a/mempool/mempool.go b/mempool/mempool.go index 27782edc5e..3b921a72aa 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -1,6 +1,7 @@ package mempool import ( + "context" "fmt" abci "github.com/lazyledger/lazyledger-core/abci/types" @@ -98,6 +99,8 @@ type TxInfo struct { SenderID uint16 // SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging. SenderP2PID p2p.ID + // Context is the optional context to cancel CheckTx + Context context.Context } //-------------------------------------------------------------------------------- diff --git a/mempool/reactor.go b/mempool/reactor.go index 777b4a20b4..182068dc18 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -160,10 +160,12 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { msg, err := memR.decodeMsg(msgBytes) if err != nil { - memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) memR.Switch.StopPeerForError(src, err) return } diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index 972ae60657..07382ba713 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -10,8 +10,8 @@ sudo apt-get upgrade -y sudo apt-get install -y jq unzip python-pip software-properties-common make # get and unpack golang -curl -O https://dl.google.com/go/go1.14.4.linux-amd64.tar.gz -tar -xvf go1.14.4.linux-amd64.tar.gz +curl -O https://dl.google.com/go/go1.15.4.linux-amd64.tar.gz +tar -xvf go1.15.4.linux-amd64.tar.gz ## move binary and add to path mv go /usr/local diff --git a/node/node.go b/node/node.go index a615a4de1c..cbc9c21335 100644 --- a/node/node.go +++ b/node/node.go @@ -91,8 +91,13 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) } + pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + if err != nil { + return nil, err + } + return NewNode(config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pval, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -122,7 +127,7 @@ func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { // Option sets a parameter for the node. type Option func(*Node) -// Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors. +// Temporary interface for switching to fast sync, we should get rid of v0. // See: https://github.com/tendermint/tendermint/issues/4595 type fastSyncReactor interface { SwitchToFastSync(sm.State) error @@ -179,7 +184,7 @@ type Node struct { sw *p2p.Switch // p2p connections addrBook pex.AddrBook // known peers nodeInfo p2p.NodeInfo - nodeKey *p2p.NodeKey // our node privkey + nodeKey p2p.NodeKey // our node privkey isListening bool // services @@ -406,7 +411,7 @@ func createConsensusReactor(config *cfg.Config, func createTransport( config *cfg.Config, nodeInfo p2p.NodeInfo, - nodeKey *p2p.NodeKey, + nodeKey p2p.NodeKey, proxyApp proxy.AppConns, ) ( *p2p.MultiplexTransport, @@ -414,7 +419,7 @@ func createTransport( ) { var ( mConnConfig = p2p.MConnConfig(config.P2P) - transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) + transport = p2p.NewMultiplexTransport(nodeInfo, nodeKey, mConnConfig) connFilters = []p2p.ConnFilterFunc{} peerFilters = []p2p.PeerFilterFunc{} ) @@ -430,7 +435,7 @@ func createTransport( connFilters, // ABCI query for address filtering. func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ + res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), }) if err != nil { @@ -448,7 +453,7 @@ func createTransport( peerFilters, // ABCI query for ID filtering. func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ + res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), }) if err != nil { @@ -478,11 +483,11 @@ func createSwitch(config *cfg.Config, peerFilters []p2p.PeerFilterFunc, mempoolReactor *mempl.Reactor, bcReactor p2p.Reactor, - stateSyncReactor *statesync.Reactor, + stateSyncReactor *p2p.ReactorShim, consensusReactor *cs.Reactor, evidenceReactor *evidence.Reactor, nodeInfo p2p.NodeInfo, - nodeKey *p2p.NodeKey, + nodeKey p2p.NodeKey, p2pLogger log.Logger) *p2p.Switch { sw := p2p.NewSwitch( @@ -501,26 +506,26 @@ func createSwitch(config *cfg.Config, sw.SetNodeInfo(nodeInfo) sw.SetNodeKey(nodeKey) - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile()) return sw } func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { + p2pLogger log.Logger, nodeKey p2p.NodeKey) (pex.AddrBook, error) { addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) // Add ourselves to addrbook to prevent dialing ourselves if config.P2P.ExternalAddress != "" { - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ExternalAddress)) if err != nil { return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) } addrBook.AddOurAddress(addr) } if config.P2P.ListenAddress != "" { - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ListenAddress)) if err != nil { return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) } @@ -612,7 +617,7 @@ func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reacto // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, - nodeKey *p2p.NodeKey, + nodeKey p2p.NodeKey, clientCreator proxy.ClientCreator, genesisDocProvider GenesisDocProvider, dbProvider DBProvider, @@ -741,9 +746,18 @@ func NewNode(config *cfg.Config, // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - stateSyncReactor := statesync.NewReactor(proxyApp.Snapshot(), proxyApp.Query(), - config.StateSync.TempDir) - stateSyncReactor.SetLogger(logger.With("module", "statesync")) + stateSyncReactorShim := p2p.NewReactorShim("StateSyncShim", statesync.ChannelShims) + stateSyncReactorShim.SetLogger(logger.With("module", "statesync")) + + stateSyncReactor := statesync.NewReactor( + stateSyncReactorShim.Logger, + proxyApp.Snapshot(), + proxyApp.Query(), + stateSyncReactorShim.GetChannel(statesync.SnapshotChannel), + stateSyncReactorShim.GetChannel(statesync.ChunkChannel), + stateSyncReactorShim.PeerUpdates, + config.StateSync.TempDir, + ) nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) if err != nil { @@ -757,7 +771,7 @@ func NewNode(config *cfg.Config, p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, - stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + stateSyncReactorShim, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, ) err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) @@ -864,7 +878,7 @@ func (n *Node) OnStart() error { } // Start the transport. - addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID, n.config.P2P.ListenAddress)) if err != nil { return err } @@ -887,6 +901,11 @@ func (n *Node) OnStart() error { return err } + // Start the real state sync reactor separately since the switch uses the shim. + if err := n.stateSyncReactor.Start(); err != nil { + return err + } + // Always connect to persistent peers err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) if err != nil { @@ -928,6 +947,11 @@ func (n *Node) OnStop() { n.Logger.Error("Error closing switch", "err", err) } + // Stop the real state sync reactor separately since the switch uses the shim. + if err := n.stateSyncReactor.Stop(); err != nil { + n.Logger.Error("failed to stop state sync service", "err", err) + } + // stop mempool WAL if n.config.Mempool.WalEnabled() { n.mempool.CloseWAL() @@ -1216,7 +1240,7 @@ func (n *Node) NodeInfo() p2p.NodeInfo { func makeNodeInfo( config *cfg.Config, - nodeKey *p2p.NodeKey, + nodeKey p2p.NodeKey, txIndexer txindex.TxIndexer, genDoc *types.GenesisDoc, state sm.State, @@ -1242,7 +1266,7 @@ func makeNodeInfo( state.Version.Consensus.Block, state.Version.Consensus.App, ), - DefaultNodeID: nodeKey.ID(), + DefaultNodeID: nodeKey.ID, Network: genDoc.ChainID, Version: version.TMCoreSemVer, Channels: []byte{ @@ -1250,7 +1274,7 @@ func makeNodeInfo( cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, mempl.MempoolChannel, evidence.EvidenceChannel, - statesync.SnapshotChannel, statesync.ChunkChannel, + byte(statesync.SnapshotChannel), byte(statesync.ChunkChannel), }, Moniker: config.Moniker, Other: p2p.DefaultNodeInfoOther{ diff --git a/node/node_test.go b/node/node_test.go index 6f440c66ec..e42a4bbb6f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3,6 +3,7 @@ package node import ( "context" "fmt" + "math" "net" "os" "syscall" @@ -16,7 +17,9 @@ import ( "github.com/lazyledger/lazyledger-core/abci/example/kvstore" cfg "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/ed25519" + "github.com/lazyledger/lazyledger-core/crypto/tmhash" "github.com/lazyledger/lazyledger-core/evidence" "github.com/lazyledger/lazyledger-core/libs/log" tmrand "github.com/lazyledger/lazyledger-core/libs/rand" @@ -230,23 +233,22 @@ func TestCreateProposalBlock(t *testing.T) { logger := log.TestingLogger() - var height int64 = 1 + const height int64 = 1 state, stateDB, privVals := state(1, height) stateStore := sm.NewStore(stateDB) maxBytes := 16384 - var partSize uint32 = 256 + const partSize uint32 = 256 maxEvidenceBytes := int64(maxBytes / 2) state.ConsensusParams.Block.MaxBytes = int64(maxBytes) state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - memplMetrics := mempl.PrometheusMetrics("node_test_1") mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempl.WithMetrics(memplMetrics), + mempl.WithMetrics(mempl.NopMetrics()), mempl.WithPreCheck(sm.TxPreCheck(state)), mempl.WithPostCheck(sm.TxPostCheck(state)), ) @@ -265,10 +267,15 @@ func TestCreateProposalBlock(t *testing.T) { for currentBytes <= maxEvidenceBytes { ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") currentBytes += int64(len(ev.Bytes())) - err := evidencePool.AddEvidenceFromConsensus(ev, time.Now(), state.Validators) + err := evidencePool.AddEvidenceFromConsensus(ev) require.NoError(t, err) } + evList, size := evidencePool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + require.Less(t, size, state.ConsensusParams.Evidence.MaxBytes+1) + evData := &types.EvidenceData{Evidence: evList} + require.EqualValues(t, size, evData.ByteSize()) + // fill the mempool with more txs // than can fit in a block txLength := 100 @@ -311,7 +318,7 @@ func TestCreateProposalBlock(t *testing.T) { assert.NoError(t, err) } -func TestMaxProposalBlockSize(t *testing.T) { +func TestMaxTxsProposalBlockSize(t *testing.T) { config := cfg.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) @@ -322,21 +329,20 @@ func TestMaxProposalBlockSize(t *testing.T) { logger := log.TestingLogger() - var height int64 = 1 + const height int64 = 1 state, stateDB, _ := state(1, height) stateStore := sm.NewStore(stateDB) - var maxBytes int64 = 16384 - var partSize uint32 = 256 + const maxBytes int64 = 16384 + const partSize uint32 = 256 state.ConsensusParams.Block.MaxBytes = maxBytes proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - memplMetrics := mempl.PrometheusMetrics("node_test_2") mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, - mempl.WithMetrics(memplMetrics), + mempl.WithMetrics(mempl.NopMetrics()), mempl.WithPreCheck(sm.TxPreCheck(state)), mempl.WithPostCheck(sm.TxPostCheck(state)), ) @@ -344,7 +350,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) - tx := tmrand.Bytes(txLength - 4 - 5) // to account for the varint and the fields in Data{} + tx := tmrand.Bytes(txLength - 4 - 5) // to account for the varint err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) assert.NoError(t, err) @@ -374,6 +380,119 @@ func TestMaxProposalBlockSize(t *testing.T) { assert.EqualValues(t, partSet.ByteSize(), int64(pb.Size())) } +func TestMaxProposalBlockSize(t *testing.T) { + config := cfg.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(config.RootDir) + cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + logger := log.TestingLogger() + + state, stateDB, _ := state(types.MaxVotesCount, int64(1)) + stateStore := sm.NewStore(stateDB) + const maxBytes int64 = 1024 * 1024 * 2 + state.ConsensusParams.Block.MaxBytes = maxBytes + proposerAddr, _ := state.Validators.GetByIndex(0) + + // Make Mempool + mempool := mempl.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempl.WithMetrics(mempl.NopMetrics()), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), + ) + mempool.SetLogger(logger) + + // fill the mempool with one txs just below the maximum size + txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) + tx := tmrand.Bytes(txLength - 6 - 4) // to account for the varint + err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + assert.NoError(t, err) + // now produce more txs than what a normal block can hold with 10 smaller txs + // At the end of the test, only the single big tx should be added + for i := 0; i < 10; i++ { + tx := tmrand.Bytes(10) + err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + assert.NoError(t, err) + } + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp.Consensus(), + mempool, + sm.EmptyEvidencePool{}, + ) + + blockID := types.BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: types.PartSetHeader{ + Total: math.MaxInt32, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + } + + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + // change state in order to produce the largest accepted header + state.LastBlockID = blockID + state.LastBlockHeight = math.MaxInt64 - 1 + state.LastBlockTime = timestamp + state.LastResultsHash = tmhash.Sum([]byte("last_results_hash")) + state.AppHash = tmhash.Sum([]byte("app_hash")) + state.Version.Consensus.Block = math.MaxInt64 + state.Version.Consensus.App = math.MaxInt64 + maxChainID := "" + for i := 0; i < types.MaxChainIDLen; i++ { + maxChainID += "𠜎" + } + state.ChainID = maxChainID + + cs := types.CommitSig{ + BlockIDFlag: types.BlockIDFlagNil, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + Timestamp: timestamp, + Signature: crypto.CRandBytes(types.MaxSignatureSize), + } + + commit := &types.Commit{ + Height: math.MaxInt64, + Round: math.MaxInt32, + BlockID: blockID, + } + + // add maximum amount of signatures to a single commit + for i := 0; i < types.MaxVotesCount; i++ { + commit.Signatures = append(commit.Signatures, cs) + } + + block, partSet := blockExec.CreateProposalBlock( + math.MaxInt64, + state, commit, + proposerAddr, + ) + + // this ensures that the header is at max size + block.Header.Time = timestamp + + pb, err := block.ToProto() + require.NoError(t, err) + + // require that the header and commit be the max possible size + require.Equal(t, int64(pb.Header.Size()), types.MaxHeaderBytes) + require.Equal(t, int64(pb.LastCommit.Size()), types.MaxCommitBytes(types.MaxVotesCount)) + // make sure that the block is less than the max possible size + assert.LessOrEqual(t, maxBytes, int64(pb.Size())) + // because of the proto overhead we expect the part set bytes to be equal or + // less than the pb block size + assert.LessOrEqual(t, partSet.ByteSize(), int64(pb.Size())) + +} + func TestNodeNewNodeCustomReactors(t *testing.T) { config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") defer os.RemoveAll(config.RootDir) @@ -383,9 +502,11 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) require.NoError(t, err) + pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + require.NoError(t, err) n, err := NewNode(config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pval, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index f5cfb9df41..0539ac0ba3 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -44,6 +44,9 @@ type Reactor interface { // copying. // // CONTRACT: msgBytes are not nil. + // + // XXX: do not call any methods that can block or incur heavy processing. + // https://github.com/tendermint/tendermint/issues/2888 Receive(chID byte, peer Peer, msgBytes []byte) } diff --git a/p2p/channel.go b/p2p/channel.go new file mode 100644 index 0000000000..54c076a6ba --- /dev/null +++ b/p2p/channel.go @@ -0,0 +1,120 @@ +package p2p + +import ( + "sync" + + "github.com/gogo/protobuf/proto" +) + +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 + +// Envelope specifies the message receiver and sender. +type Envelope struct { + From PeerID // Message sender, or empty for outbound messages. + To PeerID // Message receiver, or empty for inbound messages. + Broadcast bool // Send message to all connected peers, ignoring To. + Message proto.Message // Payload. +} + +// Channel is a bidirectional channel for Protobuf message exchange with peers. +// A Channel is safe for concurrent use by multiple goroutines. +type Channel struct { + closeOnce sync.Once + + // id defines the unique channel ID. + id ChannelID + + // messageType specifies the type of messages exchanged via the channel, and + // is used e.g. for automatic unmarshaling. + messageType proto.Message + + // inCh is a channel for receiving inbound messages. Envelope.From is always + // set. + inCh chan Envelope + + // outCh is a channel for sending outbound messages. Envelope.To or Broadcast + // must be set, otherwise the message is discarded. + outCh chan Envelope + + // errCh is a channel for reporting peer errors to the router, typically used + // when peers send an invalid or malignant message. + errCh chan PeerError + + // doneCh is used to signal that a Channel is closed. A Channel is bi-directional + // and should be closed by the reactor, where as the router is responsible + // for explicitly closing the internal In channel. + doneCh chan struct{} +} + +// NewChannel returns a reference to a new p2p Channel. It is the reactor's +// responsibility to close the Channel. After a channel is closed, the router may +// safely and explicitly close the internal In channel. +func NewChannel(id ChannelID, mType proto.Message, in, out chan Envelope, errCh chan PeerError) *Channel { + return &Channel{ + id: id, + messageType: mType, + inCh: in, + outCh: out, + errCh: errCh, + doneCh: make(chan struct{}), + } +} + +// ID returns the Channel's ID. +func (c *Channel) ID() ChannelID { + return c.id +} + +// In returns a read-only inbound go channel. This go channel should be used by +// reactors to consume Envelopes sent from peers. +func (c *Channel) In() <-chan Envelope { + return c.inCh +} + +// Out returns a write-only outbound go channel. This go channel should be used +// by reactors to route Envelopes to other peers. +func (c *Channel) Out() chan<- Envelope { + return c.outCh +} + +// Error returns a write-only outbound go channel designated for peer errors only. +// This go channel should be used by reactors to send peer errors when consuming +// Envelopes sent from other peers. +func (c *Channel) Error() chan<- PeerError { + return c.errCh +} + +// Close closes the outbound channel and marks the Channel as done. Internally, +// the outbound outCh and peer error errCh channels are closed. It is the reactor's +// responsibility to invoke Close. Any send on the Out or Error channel will +// panic after the Channel is closed. +// +// NOTE: After a Channel is closed, the router may safely assume it can no longer +// send on the internal inCh, however it should NEVER explicitly close it as +// that could result in panics by sending on a closed channel. +func (c *Channel) Close() { + c.closeOnce.Do(func() { + close(c.doneCh) + close(c.outCh) + close(c.errCh) + }) +} + +// Done returns the Channel's internal channel that should be used by a router +// to signal when it is safe to send on the internal inCh go channel. +func (c *Channel) Done() <-chan struct{} { + return c.doneCh +} + +// Wrapper is a Protobuf message that can contain a variety of inner messages. +// If a Channel's message type implements Wrapper, the channel will +// automatically (un)wrap passed messages using the container type, such that +// the channel can transparently support multiple message types. +type Wrapper interface { + // Wrap will take a message and wrap it in this one. + Wrap(proto.Message) error + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 16d312fd32..494fbf8023 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -45,13 +45,12 @@ func createMConnectionWithCallbacks( func TestMConnectionSendFlushStop(t *testing.T) { server, client := NetPipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) clientConn := createTestMConnection(client) err := clientConn.Start() require.Nil(t, err) - defer clientConn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, clientConn)) msg := []byte("abc") assert.True(t, clientConn.Send(0x01, msg)) @@ -83,13 +82,12 @@ func TestMConnectionSendFlushStop(t *testing.T) { func TestMConnectionSend(t *testing.T) { server, client := NetPipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) msg := []byte("Ant-Man") assert.True(t, mconn.Send(0x01, msg)) @@ -114,8 +112,7 @@ func TestMConnectionSend(t *testing.T) { func TestMConnectionReceive(t *testing.T) { server, client := NetPipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -128,12 +125,12 @@ func TestMConnectionReceive(t *testing.T) { mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn1.Start() require.Nil(t, err) - defer mconn1.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn1)) mconn2 := createTestMConnection(server) err = mconn2.Start() require.Nil(t, err) - defer mconn2.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn2)) msg := []byte("Cyclops") assert.True(t, mconn2.Send(0x01, msg)) @@ -150,13 +147,12 @@ func TestMConnectionReceive(t *testing.T) { func TestMConnectionStatus(t *testing.T) { server, client := NetPipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) status := mconn.Status() assert.NotNil(t, status) @@ -165,8 +161,7 @@ func TestMConnectionStatus(t *testing.T) { func TestMConnectionPongTimeoutResultsInError(t *testing.T) { server, client := net.Pipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -179,7 +174,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) serverGotPing := make(chan struct{}) go func() { @@ -204,8 +199,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { server, client := net.Pipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -218,7 +212,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) // sending 3 pongs in a row (abuse) protoWriter := protoio.NewDelimitedWriter(server) @@ -259,8 +253,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { func TestMConnectionMultiplePings(t *testing.T) { server, client := net.Pipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -273,7 +266,7 @@ func TestMConnectionMultiplePings(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 @@ -304,12 +297,10 @@ func TestMConnectionMultiplePings(t *testing.T) { func TestMConnectionPingPongs(t *testing.T) { // check that we are not leaking any go-routines - defer leaktest.CheckTimeout(t, 10*time.Second)() + t.Cleanup(leaktest.CheckTimeout(t, 10*time.Second)) server, client := net.Pipe() - - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -322,7 +313,7 @@ func TestMConnectionPingPongs(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) serverGotPing := make(chan struct{}) go func() { @@ -366,8 +357,7 @@ func TestMConnectionPingPongs(t *testing.T) { func TestMConnectionStopsAndReturnsError(t *testing.T) { server, client := NetPipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -380,7 +370,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) if err := client.Close(); err != nil { t.Error(err) @@ -446,18 +436,7 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { _, err := client.Write([]byte{1, 2, 3, 4, 5}) require.NoError(t, err) assert.True(t, expectSend(chOnErr), "badly encoded msgPacket") - - t.Cleanup(func() { - if err := mconnClient.Stop(); err != nil { - t.Log(err) - } - }) - - t.Cleanup(func() { - if err := mconnServer.Stop(); err != nil { - t.Log(err) - } - }) + t.Cleanup(stopAll(t, mconnClient, mconnServer)) } func TestMConnectionReadErrorUnknownChannel(t *testing.T) { @@ -473,18 +452,7 @@ func TestMConnectionReadErrorUnknownChannel(t *testing.T) { // should cause an error assert.True(t, mconnClient.Send(0x02, msg)) assert.True(t, expectSend(chOnErr), "unknown channel") - - t.Cleanup(func() { - if err := mconnClient.Stop(); err != nil { - t.Log(err) - } - }) - - t.Cleanup(func() { - if err := mconnServer.Stop(); err != nil { - t.Log(err) - } - }) + t.Cleanup(stopAll(t, mconnClient, mconnServer)) } func TestMConnectionReadErrorLongMessage(t *testing.T) { @@ -492,8 +460,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnRcv := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() // nolint:errcheck // ignore for tests - defer mconnServer.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconnClient, mconnServer)) mconnServer.onReceive = func(chID byte, msgBytes []byte) { chOnRcv <- struct{}{} @@ -528,8 +495,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { chOnErr := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() // nolint:errcheck // ignore for tests - defer mconnServer.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconnClient, mconnServer)) // send msg with unknown msg type _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"}) @@ -539,13 +505,12 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { func TestMConnectionTrySend(t *testing.T) { server, client := NetPipe() - defer server.Close() - defer client.Close() + t.Cleanup(closeAll(t, client, server)) mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(stopAll(t, mconn)) msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) @@ -587,3 +552,31 @@ func TestConnVectors(t *testing.T) { require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } } + +type stopper interface { + Stop() error +} + +func stopAll(t *testing.T, stoppers ...stopper) func() { + return func() { + for _, s := range stoppers { + if err := s.Stop(); err != nil { + t.Log(err) + } + } + } +} + +type closer interface { + Close() error +} + +func closeAll(t *testing.T, closers ...closer) func() { + return func() { + for _, s := range closers { + if err := s.Close(); err != nil { + t.Log(err) + } + } + } +} diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 7fc7a9aba3..a4045185e6 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "log" "os" "path/filepath" @@ -21,7 +22,6 @@ import ( "github.com/lazyledger/lazyledger-core/crypto/ed25519" "github.com/lazyledger/lazyledger-core/crypto/sr25519" "github.com/lazyledger/lazyledger-core/libs/async" - tmos "github.com/lazyledger/lazyledger-core/libs/os" tmrand "github.com/lazyledger/lazyledger-core/libs/rand" ) @@ -228,14 +228,13 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - err := tmos.WriteFile(goldenFilepath, []byte(data), 0644) - require.NoError(t, err) + require.NoError(t, ioutil.WriteFile(goldenFilepath, []byte(data), 0644)) } f, err := os.Open(goldenFilepath) if err != nil { log.Fatal(err) } - defer f.Close() + t.Cleanup(closeAll(t, f)) scanner := bufio.NewScanner(f) for scanner.Scan() { line := scanner.Text() @@ -259,8 +258,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { func TestNilPubkey(t *testing.T) { var fooConn, barConn = makeKVStoreConnPair() - defer fooConn.Close() - defer barConn.Close() + t.Cleanup(closeAll(t, fooConn, barConn)) var fooPrvKey = ed25519.GenPrivKey() var barPrvKey = privKeyWithNilPubKey{ed25519.GenPrivKey()} @@ -273,8 +271,8 @@ func TestNilPubkey(t *testing.T) { func TestNonEd25519Pubkey(t *testing.T) { var fooConn, barConn = makeKVStoreConnPair() - defer fooConn.Close() - defer barConn.Close() + t.Cleanup(closeAll(t, fooConn, barConn)) + var fooPrvKey = ed25519.GenPrivKey() var barPrvKey = sr25519.GenPrivKey() diff --git a/p2p/fuzz.go b/p2p/fuzz.go deleted file mode 100644 index 652560dfc7..0000000000 --- a/p2p/fuzz.go +++ /dev/null @@ -1,153 +0,0 @@ -package p2p - -import ( - "net" - "time" - - "github.com/lazyledger/lazyledger-core/config" - tmrand "github.com/lazyledger/lazyledger-core/libs/rand" - tmsync "github.com/lazyledger/lazyledger-core/libs/sync" -) - -// FuzzedConnection wraps any net.Conn and depending on the mode either delays -// reads/writes or randomly drops reads/writes/connections. -type FuzzedConnection struct { - conn net.Conn - - mtx tmsync.Mutex - start <-chan time.Time - active bool - - config *config.FuzzConnConfig -} - -// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately. -func FuzzConn(conn net.Conn) net.Conn { - return FuzzConnFromConfig(conn, config.DefaultFuzzConnConfig()) -} - -// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing -// starts immediately. -func FuzzConnFromConfig(conn net.Conn, config *config.FuzzConnConfig) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: make(<-chan time.Time), - active: true, - config: config, - } -} - -// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the -// duration elapses. -func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn { - return FuzzConnAfterFromConfig(conn, d, config.DefaultFuzzConnConfig()) -} - -// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config. -// Fuzzing starts when the duration elapses. -func FuzzConnAfterFromConfig( - conn net.Conn, - d time.Duration, - config *config.FuzzConnConfig, -) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: time.After(d), - active: false, - config: config, - } -} - -// Config returns the connection's config. -func (fc *FuzzedConnection) Config() *config.FuzzConnConfig { - return fc.config -} - -// Read implements net.Conn. -func (fc *FuzzedConnection) Read(data []byte) (n int, err error) { - if fc.fuzz() { - return 0, nil - } - return fc.conn.Read(data) -} - -// Write implements net.Conn. -func (fc *FuzzedConnection) Write(data []byte) (n int, err error) { - if fc.fuzz() { - return 0, nil - } - return fc.conn.Write(data) -} - -// Close implements net.Conn. -func (fc *FuzzedConnection) Close() error { return fc.conn.Close() } - -// LocalAddr implements net.Conn. -func (fc *FuzzedConnection) LocalAddr() net.Addr { return fc.conn.LocalAddr() } - -// RemoteAddr implements net.Conn. -func (fc *FuzzedConnection) RemoteAddr() net.Addr { return fc.conn.RemoteAddr() } - -// SetDeadline implements net.Conn. -func (fc *FuzzedConnection) SetDeadline(t time.Time) error { return fc.conn.SetDeadline(t) } - -// SetReadDeadline implements net.Conn. -func (fc *FuzzedConnection) SetReadDeadline(t time.Time) error { - return fc.conn.SetReadDeadline(t) -} - -// SetWriteDeadline implements net.Conn. -func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error { - return fc.conn.SetWriteDeadline(t) -} - -func (fc *FuzzedConnection) randomDuration() time.Duration { - maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) - return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) // nolint: gas -} - -// implements the fuzz (delay, kill conn) -// and returns whether or not the read/write should be ignored -func (fc *FuzzedConnection) fuzz() bool { - if !fc.shouldFuzz() { - return false - } - - switch fc.config.Mode { - case config.FuzzModeDrop: - // randomly drop the r/w, drop the conn, or sleep - r := tmrand.Float64() - switch { - case r <= fc.config.ProbDropRW: - return true - case r < fc.config.ProbDropRW+fc.config.ProbDropConn: - // XXX: can't this fail because machine precision? - // XXX: do we need an error? - fc.Close() - return true - case r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep: - time.Sleep(fc.randomDuration()) - } - case config.FuzzModeDelay: - // sleep a bit - time.Sleep(fc.randomDuration()) - } - return false -} - -func (fc *FuzzedConnection) shouldFuzz() bool { - if fc.active { - return true - } - - fc.mtx.Lock() - defer fc.mtx.Unlock() - - select { - case <-fc.start: - fc.active = true - return true - default: - return false - } -} diff --git a/p2p/key.go b/p2p/key.go index 2454e6226f..ae16ed2b07 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -1,9 +1,7 @@ package p2p import ( - "bytes" "encoding/hex" - "fmt" "io/ioutil" "github.com/lazyledger/lazyledger-core/crypto" @@ -26,19 +24,30 @@ const IDByteLength = crypto.AddressSize // NodeKey is the persistent peer key. // It contains the nodes private key for authentication. type NodeKey struct { - PrivKey crypto.PrivKey `json:"priv_key"` // our priv key -} - -// ID returns the peer's canonical ID - the hash of its public key. -func (nodeKey *NodeKey) ID() ID { - return PubKeyToID(nodeKey.PubKey()) + // Canonical ID - hex-encoded pubkey's address (IDByteLength bytes) + ID ID `json:"id"` + // Private key + PrivKey crypto.PrivKey `json:"priv_key"` } // PubKey returns the peer's PubKey -func (nodeKey *NodeKey) PubKey() crypto.PubKey { +func (nodeKey NodeKey) PubKey() crypto.PubKey { return nodeKey.PrivKey.PubKey() } +// SaveAs persists the NodeKey to filePath. +func (nodeKey NodeKey) SaveAs(filePath string) error { + jsonBytes, err := tmjson.Marshal(nodeKey) + if err != nil { + return err + } + err = ioutil.WriteFile(filePath, jsonBytes, 0600) + if err != nil { + return err + } + return nil +} + // PubKeyToID returns the ID corresponding to the given PubKey. // It's the hex-encoding of the pubKey.Address(). func PubKeyToID(pubKey crypto.PubKey) ID { @@ -47,74 +56,44 @@ func PubKeyToID(pubKey crypto.PubKey) ID { // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If // the file does not exist, it generates and saves a new NodeKey. -func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { +func LoadOrGenNodeKey(filePath string) (NodeKey, error) { if tmos.FileExists(filePath) { nodeKey, err := LoadNodeKey(filePath) if err != nil { - return nil, err + return NodeKey{}, err } return nodeKey, nil } - privKey := ed25519.GenPrivKey() - nodeKey := &NodeKey{ - PrivKey: privKey, - } + nodeKey := GenNodeKey() if err := nodeKey.SaveAs(filePath); err != nil { - return nil, err + return NodeKey{}, err } return nodeKey, nil } -// LoadNodeKey loads NodeKey located in filePath. -func LoadNodeKey(filePath string) (*NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - nodeKey := new(NodeKey) - err = tmjson.Unmarshal(jsonBytes, nodeKey) - if err != nil { - return nil, err +// GenNodeKey generates a new node key. +func GenNodeKey() NodeKey { + privKey := ed25519.GenPrivKey() + return NodeKey{ + ID: PubKeyToID(privKey.PubKey()), + PrivKey: privKey, } - return nodeKey, nil } -// SaveAs persists the NodeKey to filePath. -func (nodeKey *NodeKey) SaveAs(filePath string) error { - jsonBytes, err := tmjson.Marshal(nodeKey) +// LoadNodeKey loads NodeKey located in filePath. +func LoadNodeKey(filePath string) (NodeKey, error) { + jsonBytes, err := ioutil.ReadFile(filePath) if err != nil { - return err + return NodeKey{}, err } - err = ioutil.WriteFile(filePath, jsonBytes, 0600) + nodeKey := NodeKey{} + err = tmjson.Unmarshal(jsonBytes, &nodeKey) if err != nil { - return err - } - return nil -} - -//------------------------------------------------------------------------------ - -// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1. -// It can be used as a Proof of Work target. -// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits. -func MakePoWTarget(difficulty, targetBits uint) []byte { - if targetBits%8 != 0 { - panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits)) - } - if difficulty >= targetBits { - panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits)) + return NodeKey{}, err } - targetBytes := targetBits / 8 - zeroPrefixLen := (int(difficulty) / 8) - prefix := bytes.Repeat([]byte{0}, zeroPrefixLen) - mod := (difficulty % 8) - if mod > 0 { - nonZeroPrefix := byte(1<<(8-mod) - 1) - prefix = append(prefix, nonZeroPrefix) - } - tailLen := int(targetBytes) - len(prefix) - return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...) + nodeKey.ID = PubKeyToID(nodeKey.PubKey()) + return nodeKey, nil } diff --git a/p2p/key_test.go b/p2p/key_test.go index 015b32134f..9f3f631ecd 100644 --- a/p2p/key_test.go +++ b/p2p/key_test.go @@ -1,7 +1,6 @@ package p2p import ( - "bytes" "os" "path/filepath" "testing" @@ -9,7 +8,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/lazyledger/lazyledger-core/crypto/ed25519" tmrand "github.com/lazyledger/lazyledger-core/libs/rand" ) @@ -44,38 +42,8 @@ func TestNodeKeySaveAs(t *testing.T) { assert.NoFileExists(t, filePath) - privKey := ed25519.GenPrivKey() - nodeKey := &NodeKey{ - PrivKey: privKey, - } + nodeKey := GenNodeKey() err := nodeKey.SaveAs(filePath) assert.NoError(t, err) assert.FileExists(t, filePath) } - -//---------------------------------------------------------- - -func padBytes(bz []byte, targetBytes int) []byte { - return append(bz, bytes.Repeat([]byte{0xFF}, targetBytes-len(bz))...) -} - -func TestPoWTarget(t *testing.T) { - - targetBytes := 20 - cases := []struct { - difficulty uint - target []byte - }{ - {0, padBytes([]byte{}, targetBytes)}, - {1, padBytes([]byte{127}, targetBytes)}, - {8, padBytes([]byte{0}, targetBytes)}, - {9, padBytes([]byte{0, 127}, targetBytes)}, - {10, padBytes([]byte{0, 63}, targetBytes)}, - {16, padBytes([]byte{0, 0}, targetBytes)}, - {17, padBytes([]byte{0, 0, 127}, targetBytes)}, - } - - for _, c := range cases { - assert.Equal(t, MakePoWTarget(c.difficulty, 20*8), c.target) - } -} diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index d4146bb613..535840b9d8 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -3,7 +3,6 @@ package mock import ( "net" - "github.com/lazyledger/lazyledger-core/crypto/ed25519" "github.com/lazyledger/lazyledger-core/libs/service" "github.com/lazyledger/lazyledger-core/p2p" "github.com/lazyledger/lazyledger-core/p2p/conn" @@ -27,11 +26,11 @@ func NewPeer(ip net.IP) *Peer { } else { netAddr = p2p.NewNetAddressIPPort(ip, 26656) } - nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} - netAddr.ID = nodeKey.ID() + nodeKey := p2p.GenNodeKey() + netAddr.ID = nodeKey.ID mp := &Peer{ ip: ip, - id: nodeKey.ID(), + id: nodeKey.ID, addr: netAddr, kv: make(map[string]interface{}), } diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go index ae6bb3eddf..74faccbce2 100644 --- a/p2p/mocks/peer.go +++ b/p2p/mocks/peer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go index 5aa927845c..bb30a968ae 100644 --- a/p2p/node_info_test.go +++ b/p2p/node_info_test.go @@ -4,8 +4,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/lazyledger/lazyledger-core/crypto/ed25519" ) func TestNodeInfoValidate(t *testing.T) { @@ -66,16 +64,16 @@ func TestNodeInfoValidate(t *testing.T) { {"Good RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "0.0.0.0:26657" }, false}, } - nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey := GenNodeKey() name := "testing" // test case passes - ni = testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) + ni = testNodeInfo(nodeKey.ID, name).(DefaultNodeInfo) ni.Channels = channels assert.NoError(t, ni.Validate()) for _, tc := range testCases { - ni := testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) + ni := testNodeInfo(nodeKey.ID, name).(DefaultNodeInfo) ni.Channels = channels tc.malleateNodeInfo(&ni) err := ni.Validate() @@ -90,15 +88,15 @@ func TestNodeInfoValidate(t *testing.T) { func TestNodeInfoCompatible(t *testing.T) { - nodeKey1 := NodeKey{PrivKey: ed25519.GenPrivKey()} - nodeKey2 := NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey1 := GenNodeKey() + nodeKey2 := GenNodeKey() name := "testing" var newTestChannel byte = 0x2 // test NodeInfo is compatible - ni1 := testNodeInfo(nodeKey1.ID(), name).(DefaultNodeInfo) - ni2 := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) + ni1 := testNodeInfo(nodeKey1.ID, name).(DefaultNodeInfo) + ni2 := testNodeInfo(nodeKey2.ID, name).(DefaultNodeInfo) assert.NoError(t, ni1.CompatibleWith(ni2)) // add another channel; still compatible @@ -120,7 +118,7 @@ func TestNodeInfoCompatible(t *testing.T) { } for _, tc := range testCases { - ni := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) + ni := testNodeInfo(nodeKey2.ID, name).(DefaultNodeInfo) tc.malleateNodeInfo(&ni) assert.Error(t, ni1.CompatibleWith(ni)) } diff --git a/p2p/peer.go b/p2p/peer.go index 5b662d63ff..45996df394 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -1,8 +1,12 @@ package p2p import ( + "bytes" + "encoding/hex" "fmt" "net" + "strings" + "sync" "time" "github.com/lazyledger/lazyledger-core/libs/cmap" @@ -12,6 +16,133 @@ import ( tmconn "github.com/lazyledger/lazyledger-core/p2p/conn" ) +// PeerID is a unique peer ID, generally expressed in hex form. +type PeerID []byte + +// String implements the fmt.Stringer interface for the PeerID type. +func (pid PeerID) String() string { + return strings.ToLower(hex.EncodeToString(pid)) +} + +// Empty returns true if the PeerID is considered empty. +func (pid PeerID) Empty() bool { + return len(pid) == 0 +} + +// PeerIDFromString returns a PeerID from an encoded string or an error upon +// decode failure. +func PeerIDFromString(s string) (PeerID, error) { + bz, err := hex.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("failed to decode PeerID (%s): %w", s, err) + } + + return PeerID(bz), nil +} + +// Equal reports whether two PeerID are equal. +func (pid PeerID) Equal(other PeerID) bool { + return bytes.Equal(pid, other) +} + +// PeerStatus specifies peer statuses. +type PeerStatus string + +const ( + PeerStatusNew = PeerStatus("new") // New peer which we haven't tried to contact yet. + PeerStatusUp = PeerStatus("up") // Peer which we have an active connection to. + PeerStatusDown = PeerStatus("down") // Peer which we're temporarily disconnected from. + PeerStatusRemoved = PeerStatus("removed") // Peer which has been removed. + PeerStatusBanned = PeerStatus("banned") // Peer which is banned for misbehavior. +) + +// PeerPriority specifies peer priorities. +type PeerPriority int + +const ( + PeerPriorityNormal PeerPriority = iota + 1 + PeerPriorityValidator + PeerPriorityPersistent +) + +// PeerError is a peer error reported by a reactor via the Error channel. The +// severity may cause the peer to be disconnected or banned depending on policy. +type PeerError struct { + PeerID PeerID + Err error + Severity PeerErrorSeverity +} + +// PeerErrorSeverity determines the severity of a peer error. +type PeerErrorSeverity string + +const ( + PeerErrorSeverityLow PeerErrorSeverity = "low" // Mostly ignored. + PeerErrorSeverityHigh PeerErrorSeverity = "high" // May disconnect. + PeerErrorSeverityCritical PeerErrorSeverity = "critical" // Ban. +) + +// PeerUpdatesCh defines a wrapper around a PeerUpdate go channel that allows +// a reactor to listen for peer updates and safely close it when stopping. +type PeerUpdatesCh struct { + closeOnce sync.Once + + // updatesCh defines the go channel in which the router sends peer updates to + // reactors. Each reactor will have its own PeerUpdatesCh to listen for updates + // from. + updatesCh chan PeerUpdate + + // doneCh is used to signal that a PeerUpdatesCh is closed. It is the + // reactor's responsibility to invoke Close. + doneCh chan struct{} +} + +// NewPeerUpdates returns a reference to a new PeerUpdatesCh. +func NewPeerUpdates() *PeerUpdatesCh { + return &PeerUpdatesCh{ + updatesCh: make(chan PeerUpdate), + doneCh: make(chan struct{}), + } +} + +// Updates returns a read-only go channel where a consuming reactor can listen +// for peer updates sent from the router. +func (puc *PeerUpdatesCh) Updates() <-chan PeerUpdate { + return puc.updatesCh +} + +// Close closes the PeerUpdatesCh channel. It should only be closed by the respective +// reactor when stopping and ensure nothing is listening for updates. +// +// NOTE: After a PeerUpdatesCh is closed, the router may safely assume it can no +// longer send on the internal updatesCh, however it should NEVER explicitly close +// it as that could result in panics by sending on a closed channel. +func (puc *PeerUpdatesCh) Close() { + puc.closeOnce.Do(func() { + close(puc.doneCh) + }) +} + +// Done returns a read-only version of the PeerUpdatesCh's internal doneCh go +// channel that should be used by a router to signal when it is safe to explicitly +// not send any peer updates. +func (puc *PeerUpdatesCh) Done() <-chan struct{} { + return puc.doneCh +} + +// PeerUpdate is a peer status update for reactors. +type PeerUpdate struct { + PeerID PeerID + Status PeerStatus +} + +// ============================================================================ +// Types and business logic below may be deprecated. +// +// TODO: Rename once legacy p2p types are removed. +// ref: https://github.com/tendermint/tendermint/issues/5670 +// ============================================================================ + //go:generate mockery --case underscore --name Peer const metricsTickerDuration = 10 * time.Second diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 3ac061bd1a..457c2f4f92 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/lazyledger/lazyledger-core/crypto/ed25519" "github.com/lazyledger/lazyledger-core/libs/service" ) @@ -38,10 +37,10 @@ func newMockPeer(ip net.IP) *mockPeer { if ip == nil { ip = net.IP{127, 0, 0, 1} } - nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey := GenNodeKey() return &mockPeer{ ip: ip, - id: nodeKey.ID(), + id: nodeKey.ID, } } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 71ae3336fb..36ff000443 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -19,6 +19,51 @@ import ( tmconn "github.com/lazyledger/lazyledger-core/p2p/conn" ) +func TestPeerIDFromString(t *testing.T) { + testCases := map[string]struct { + input string + expectedID PeerID + expectErr bool + }{ + "empty peer ID string": {"", PeerID{}, false}, + "invalid peer ID string": {"foo", nil, true}, + "valid peer ID string": {"ff", PeerID{0xFF}, false}, + } + + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + pID, err := PeerIDFromString(tc.input) + require.Equal(t, tc.expectErr, err != nil, err) + require.Equal(t, tc.expectedID, pID) + }) + } +} + +func TestPeerID_String(t *testing.T) { + require.Equal(t, "", PeerID{}.String()) + require.Equal(t, "ff", PeerID{0xFF}.String()) +} + +func TestPeerID_Equal(t *testing.T) { + testCases := map[string]struct { + idA PeerID + idB PeerID + equal bool + }{ + "empty IDs": {PeerID{}, PeerID{}, true}, + "not equal": {PeerID{0xFF}, PeerID{0xAA}, false}, + "equal": {PeerID{0xFF}, PeerID{0xFF}, true}, + } + + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + require.Equal(t, tc.equal, tc.idA.Equal(tc.idB)) + }) + } +} + func TestPeerBasic(t *testing.T) { assert, require := assert.New(t), require.New(t) diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index dd4f101a80..0c772039ac 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -22,8 +22,7 @@ import ( // FIXME These tests should not rely on .(*addrBook) assertions func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) + fname := createTempFileName(t, "addrbook_test") // 0 addresses book := NewAddrBook(fname, true) @@ -59,8 +58,7 @@ func TestAddrBookPickAddress(t *testing.T) { } func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) + fname := createTempFileName(t, "addrbook_test") // 0 addresses book := NewAddrBook(fname, true) @@ -94,9 +92,7 @@ func TestAddrBookSaveLoad(t *testing.T) { } func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") randAddrs := randNetAddressPairs(t, 100) book := NewAddrBook(fname, true) @@ -113,9 +109,7 @@ func TestAddrBookLookup(t *testing.T) { } func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") randAddrs := randNetAddressPairs(t, 100) book := NewAddrBook(fname, true) @@ -157,10 +151,9 @@ func TestAddrBookPromoteToOld(t *testing.T) { } func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) randAddrs := randNetAddressPairs(t, 100) @@ -211,9 +204,7 @@ func randIPv4Address(t *testing.T) *p2p.NetAddress { } func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -232,9 +223,7 @@ func TestAddrBookRemoveAddress(t *testing.T) { func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { // create a book with 10 addresses, 1 good/old and 9 new - book, fname := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) - defer deleteTempFile(fname) - + book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) addrs := book.GetSelectionWithBias(biasToSelectNewPeers) assert.NotNil(t, addrs) assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book) @@ -242,26 +231,20 @@ func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) { // create a book with 10 addresses, 9 good/old and 1 new - book, fname := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) - defer deleteTempFile(fname) - + book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) addrs := book.GetSelectionWithBias(biasToSelectNewPeers) assert.NotNil(t, addrs) assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book) } func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { - book, fname := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) - defer deleteTempFile(fname) - + book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) addrs := book.GetSelectionWithBias(biasToSelectNewPeers) assert.Nil(t, addrs) } func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -301,9 +284,7 @@ func TestAddrBookGetSelection(t *testing.T) { func TestAddrBookGetSelectionWithBias(t *testing.T) { const biasTowardsNewAddrs = 30 - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -384,9 +365,7 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { } func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) addr := randIPv4Address(t) @@ -401,6 +380,7 @@ func TestAddrBookHasAddress(t *testing.T) { } func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { + t.Helper() addrs := make([]*p2p.NetAddress, numAddrs) for i := 0; i < numAddrs; i++ { addrs[i] = randIPv4Address(t) @@ -414,9 +394,7 @@ func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []st } func TestBanBadPeers(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -441,9 +419,7 @@ func TestBanBadPeers(t *testing.T) { } func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) // Check that empty book is empty @@ -463,9 +439,7 @@ func TestAddrBookEmpty(t *testing.T) { } func TestPrivatePeers(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - + fname := createTempFileName(t, "addrbook_test") book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) @@ -496,8 +470,7 @@ func testAddrBookAddressSelection(t *testing.T, bookSize int) { dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) // create book and get selection - book, fname := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) - defer deleteTempFile(fname) + book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) addrs := book.GetSelectionWithBias(biasToSelectNewPeers) assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) nAddrs := len(addrs) @@ -590,8 +563,7 @@ func TestMultipleAddrBookAddressSelection(t *testing.T) { } func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) + fname := createTempFileName(t, "addrbook_test") // This test creates adds a peer to the address book and marks it good // It then attempts to override the peer's IP, by adding a peer with the same ID @@ -717,28 +689,26 @@ func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetA assert.Equal(t, n, nNew, "new addresses") } -func createTempFileName(prefix string) string { +func createTempFileName(t *testing.T, prefix string) string { + t.Helper() f, err := ioutil.TempFile("", prefix) if err != nil { panic(err) } + fname := f.Name() - err = f.Close() - if err != nil { - panic(err) + if err := f.Close(); err != nil { + t.Fatal(err) } - return fname -} -func deleteTempFile(fname string) { - err := os.Remove(fname) - if err != nil { - panic(err) - } + t.Cleanup(func() { _ = os.Remove(fname) }) + + return fname } func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - fname = createTempFileName("addrbook_test") + t.Helper() + fname = createTempFileName(t, "addrbook_test") book = NewAddrBook(fname, true).(*addrBook) book.SetLogger(log.TestingLogger()) diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index a8c7cd5466..8a0bb40557 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -236,10 +236,12 @@ func (r *Reactor) logErrAddrBook(err error) { } // Receive implements Reactor by handling incoming PEX messages. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { msg, err := decodeMsg(msgBytes) if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) r.Switch.StopPeerForError(src, err) return } diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 55b43a4ef8..50f1bb4865 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -31,16 +31,14 @@ func init() { } func TestPEXReactorBasic(t *testing.T) { - r, book := createReactor(&ReactorConfig{}) - defer teardownReactor(book) + r, _ := createReactor(t, &ReactorConfig{}) assert.NotNil(t, r) assert.NotEmpty(t, r.GetChannels()) } func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(&ReactorConfig{}) - defer teardownReactor(book) + r, book := createReactor(t, &ReactorConfig{}) size := book.Size() peer := p2p.CreateRandomPeer(false) @@ -73,9 +71,7 @@ func TestPEXReactorRunning(t *testing.T) { switches := make([]*p2p.Switch, N) // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) books := make([]AddrBook, N) logger := log.TestingLogger() @@ -123,9 +119,7 @@ func TestPEXReactorRunning(t *testing.T) { } func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(&ReactorConfig{}) - defer teardownReactor(book) - + r, book := createReactor(t, &ReactorConfig{}) peer := p2p.CreateRandomPeer(false) // we have to send a request to receive responses @@ -141,9 +135,7 @@ func TestPEXReactorReceive(t *testing.T) { } func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(&ReactorConfig{}) - defer teardownReactor(book) - + r, book := createReactor(t, &ReactorConfig{}) sw := createSwitchAndAddReactors(r) sw.SetAddrBook(book) @@ -176,9 +168,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { } func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(&ReactorConfig{}) - defer teardownReactor(book) - + r, book := createReactor(t, &ReactorConfig{}) sw := createSwitchAndAddReactors(r) sw.SetAddrBook(book) @@ -208,9 +198,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { func TestCheckSeeds(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) // 1. test creating peer with no seeds works peerSwitch := testCreateDefaultPeer(dir, 0) @@ -247,19 +235,17 @@ func TestCheckSeeds(t *testing.T) { func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) // 1. create seed seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) require.Nil(t, seed.Start()) - defer seed.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = seed.Stop() }) // 2. create usual peer with only seed configured. peer := testCreatePeerWithSeed(dir, 1, seed) require.Nil(t, peer.Start()) - defer peer.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = peer.Stop() }) // 3. check that the peer connects to seed immediately assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) @@ -267,25 +253,23 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) // 1. create peer peerSwitch := testCreateDefaultPeer(dir, 1) require.Nil(t, peerSwitch.Start()) - defer peerSwitch.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = peerSwitch.Stop() }) // 2. Create seed which knows about the peer peerAddr := peerSwitch.NetAddress() seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) require.Nil(t, seed.Start()) - defer seed.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = seed.Stop() }) // 3. create another peer with only seed configured. secondPeer := testCreatePeerWithSeed(dir, 3, seed) require.Nil(t, secondPeer.Start()) - defer secondPeer.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = secondPeer.Stop() }) // 4. check that the second peer connects to seed immediately assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) @@ -296,25 +280,21 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { func TestPEXReactorSeedMode(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} - pexR, book := createReactor(pexRConfig) - defer teardownReactor(book) - + pexR, book := createReactor(t, pexRConfig) sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) - err = sw.Start() - require.NoError(t, err) - defer sw.Stop() // nolint:errcheck // ignore for tests + require.NoError(t, sw.Start()) + t.Cleanup(func() { _ = sw.Stop() }) assert.Zero(t, sw.Peers().Size()) peerSwitch := testCreateDefaultPeer(dir, 1) require.NoError(t, peerSwitch.Start()) - defer peerSwitch.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = peerSwitch.Stop() }) // 1. Test crawlPeers dials the peer pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) @@ -335,28 +315,23 @@ func TestPEXReactorSeedMode(t *testing.T) { func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} - pexR, book := createReactor(pexRConfig) - defer teardownReactor(book) - + pexR, book := createReactor(t, pexRConfig) sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) - err = sw.Start() - require.NoError(t, err) - defer sw.Stop() // nolint:errcheck // ignore for tests + require.NoError(t, sw.Start()) + t.Cleanup(func() { _ = sw.Stop() }) assert.Zero(t, sw.Peers().Size()) peerSwitch := testCreateDefaultPeer(dir, 1) require.NoError(t, peerSwitch.Start()) - defer peerSwitch.Stop() // nolint:errcheck // ignore for tests + t.Cleanup(func() { _ = peerSwitch.Stop() }) - err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}) - require.NoError(t, err) + require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})) // 1. Test crawlPeers dials the peer pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) @@ -373,22 +348,16 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) - - pexR, book := createReactor(&ReactorConfig{SeedMode: true}) - defer teardownReactor(book) - + pexR, book := createReactor(t, &ReactorConfig{SeedMode: true}) sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) // No need to start sw since crawlPeers is called manually here. peer := mock.NewPeer(nil) addr := peer.SocketAddr() - err = book.AddAddress(addr, addr) - require.NoError(t, err) + require.NoError(t, book.AddAddress(addr, addr)) assert.True(t, book.HasAddress(addr)) @@ -409,9 +378,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { switches := make([]*p2p.Switch, N) // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) + dir := tempDir(t) books := make([]AddrBook, N) logger := log.TestingLogger() @@ -447,8 +414,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { reactor := switches[0].Reactors()["pex"].(*Reactor) peerID := switches[1].NodeInfo().ID() - err = switches[1].DialPeerWithAddress(switches[0].NetAddress()) - assert.NoError(t, err) + assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress())) // sleep up to a second while waiting for the peer to send us a message. // this isn't perfect since it's possible the peer sends us a msg and we FlushStop @@ -479,9 +445,8 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { peer := p2p.CreateRandomPeer(false) - pexR, book := createReactor(&ReactorConfig{}) + pexR, book := createReactor(t, &ReactorConfig{}) book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) - defer teardownReactor(book) // we have to send a request to receive responses pexR.RequestAddrs(peer) @@ -496,10 +461,9 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { } func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(&ReactorConfig{}) - defer teardownReactor(book) - + pexR, book := createReactor(t, &ReactorConfig{}) sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) peer := mock.NewPeer(nil) @@ -644,13 +608,9 @@ func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { return testCreatePeerWithConfig(dir, id, conf) } -func createReactor(conf *ReactorConfig) (r *Reactor, book AddrBook) { +func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) { // directory to store address book - dir, err := ioutil.TempDir("", "pex_reactor") - if err != nil { - panic(err) - } - book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true) + book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true) book.SetLogger(log.TestingLogger()) r = NewReactor(book, conf) @@ -658,14 +618,6 @@ func createReactor(conf *ReactorConfig) (r *Reactor, book AddrBook) { return } -func teardownReactor(book AddrBook) { - // FIXME Shouldn't rely on .(*addrBook) assertion - err := os.RemoveAll(filepath.Dir(book.(*addrBook).FilePath())) - if err != nil { - panic(err) - } -} - func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetLogger(log.TestingLogger()) @@ -677,7 +629,6 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { } func TestPexVectors(t *testing.T) { - addr := tmp2p.NetAddress{ ID: "1", IP: "127.0.0.1", @@ -701,3 +652,20 @@ func TestPexVectors(t *testing.T) { require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } } + +// FIXME: This function is used in place of testing.TB.TempDir() +// as the latter seems to cause test cases to fail when it is +// unable to remove the temporary directory once the test case +// execution terminates. This seems to happen often with pex +// reactor test cases. +// +// References: +// https://github.com/tendermint/tendermint/pull/5733 +// https://github.com/tendermint/tendermint/issues/5732 +func tempDir(t *testing.T) string { + t.Helper() + dir, err := ioutil.TempDir("", "") + require.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(dir) }) + return dir +} diff --git a/p2p/shim.go b/p2p/shim.go new file mode 100644 index 0000000000..ed3be90b16 --- /dev/null +++ b/p2p/shim.go @@ -0,0 +1,342 @@ +package p2p + +import ( + "errors" + "sort" + + "github.com/gogo/protobuf/proto" +) + +// ============================================================================ +// TODO: Types and business logic below are temporary and will be removed once +// the legacy p2p stack is removed in favor of the new model. +// +// ref: https://github.com/tendermint/tendermint/issues/5670 +// ============================================================================ + +var _ Reactor = (*ReactorShim)(nil) + +type ( + messageValidator interface { + Validate() error + } + + // ReactorShim defines a generic shim wrapper around a BaseReactor. It is + // responsible for wiring up legacy p2p behavior to the new p2p semantics + // (e.g. proxying Envelope messages to legacy peers). + ReactorShim struct { + BaseReactor + + Name string + PeerUpdates *PeerUpdatesCh + Channels map[ChannelID]*ChannelShim + } + + // ChannelShim defines a generic shim wrapper around a legacy p2p channel + // and the new p2p Channel. It also includes the raw bi-directional Go channels + // so we can proxy message delivery. + ChannelShim struct { + Descriptor *ChannelDescriptor + Channel *Channel + } + + // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel + // and the proto.Message the new p2p Channel is responsible for handling. + // A ChannelDescriptorShim is not contained in ReactorShim, but is rather + // used to construct a ReactorShim. + ChannelDescriptorShim struct { + MsgType proto.Message + Descriptor *ChannelDescriptor + } +) + +func NewReactorShim(name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { + channels := make(map[ChannelID]*ChannelShim) + + for _, cds := range descriptors { + chShim := NewChannelShim(cds, 0) + channels[chShim.Channel.id] = chShim + } + + rs := &ReactorShim{ + Name: name, + PeerUpdates: NewPeerUpdates(), + Channels: channels, + } + + rs.BaseReactor = *NewBaseReactor(name, rs) + + return rs +} + +func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { + return &ChannelShim{ + Descriptor: cds.Descriptor, + Channel: NewChannel( + ChannelID(cds.Descriptor.ID), + cds.MsgType, + make(chan Envelope, buf), + make(chan Envelope, buf), + make(chan PeerError, buf), + ), + } +} + +// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate +// go-routine where we listen for outbound envelopes sent during Receive +// executions (or anything else that may send on the Channel) and proxy them to +// the corresponding Peer using the To field from the envelope. +func (rs *ReactorShim) proxyPeerEnvelopes() { + for _, cs := range rs.Channels { + go func(cs *ChannelShim) { + for e := range cs.Channel.outCh { + msg := proto.Clone(cs.Channel.messageType) + msg.Reset() + + wrapper, ok := msg.(Wrapper) + if ok { + if err := wrapper.Wrap(e.Message); err != nil { + rs.Logger.Error( + "failed to proxy envelope; failed to wrap message", + "ch_id", cs.Descriptor.ID, + "msg", e.Message, + "err", err, + ) + continue + } + } else { + msg = e.Message + } + + bz, err := proto.Marshal(msg) + if err != nil { + rs.Logger.Error( + "failed to proxy envelope; failed to encode message", + "ch_id", cs.Descriptor.ID, + "msg", e.Message, + "err", err, + ) + continue + } + + switch { + case e.Broadcast: + rs.Switch.Broadcast(cs.Descriptor.ID, bz) + + case !e.To.Empty(): + src := rs.Switch.peers.Get(ID(e.To.String())) + if src == nil { + rs.Logger.Error( + "failed to proxy envelope; failed to find peer", + "ch_id", cs.Descriptor.ID, + "msg", e.Message, + "peer", e.To.String(), + ) + continue + } + + if !src.Send(cs.Descriptor.ID, bz) { + rs.Logger.Error( + "failed to proxy message to peer", + "ch_id", cs.Descriptor.ID, + "msg", e.Message, + "peer", e.To.String(), + ) + } + + default: + rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID, "msg", e.Message) + } + } + }(cs) + } +} + +// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine +// where we listen for peer errors. For each peer error, we find the peer from +// the legacy p2p Switch and execute a StopPeerForError call with the corresponding +// peer error. +func (rs *ReactorShim) handlePeerErrors() { + for _, cs := range rs.Channels { + go func(cs *ChannelShim) { + for pErr := range cs.Channel.errCh { + if !pErr.PeerID.Empty() { + peer := rs.Switch.peers.Get(ID(pErr.PeerID.String())) + if peer == nil { + rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.PeerID.String()) + continue + } + + rs.Switch.StopPeerForError(peer, pErr.Err) + } + } + }(cs) + } +} + +// OnStart executes the reactor shim's OnStart hook where we start all the +// necessary go-routines in order to proxy peer envelopes and errors per p2p +// Channel. +func (rs *ReactorShim) OnStart() error { + if rs.Switch == nil { + return errors.New("proxyPeerEnvelopes: reactor shim switch is nil") + } + + // start envelope proxying and peer error handling in separate go routines + rs.proxyPeerEnvelopes() + rs.handlePeerErrors() + + return nil +} + +// GetChannel returns a p2p Channel reference for a given ChannelID. If no +// Channel exists, nil is returned. +func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { + channelShim, ok := rs.Channels[cID] + if ok { + return channelShim.Channel + } + + return nil +} + +// GetChannels implements the legacy Reactor interface for getting a slice of all +// the supported ChannelDescriptors. +func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { + sortedChIDs := make([]ChannelID, 0, len(rs.Channels)) + for cID := range rs.Channels { + sortedChIDs = append(sortedChIDs, cID) + } + + sort.Slice(sortedChIDs, func(i, j int) bool { return sortedChIDs[i] < sortedChIDs[j] }) + + descriptors := make([]*ChannelDescriptor, len(rs.Channels)) + for i, cID := range sortedChIDs { + descriptors[i] = rs.Channels[cID].Descriptor + } + + return descriptors +} + +// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh. +// The embedding reactor must be sure to listen for messages on this channel to +// handle adding a peer. +func (rs *ReactorShim) AddPeer(peer Peer) { + peerID, err := PeerIDFromString(string(peer.ID())) + if err != nil { + rs.Logger.Error("failed to add peer", "peer", peer.ID(), "err", err) + return + } + + select { + case rs.PeerUpdates.updatesCh <- PeerUpdate{PeerID: peerID, Status: PeerStatusUp}: + rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peerID.String(), "status", PeerStatusUp) + + case <-rs.PeerUpdates.Done(): + // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. + // This is because there may be numerous spawned goroutines that are + // attempting to send on the updateCh go channel and when the reactor stops + // we do not want to preemptively close the channel as that could result in + // panics sending on a closed channel. This also means that reactors MUST + // be certain there are NO listeners on the updateCh channel when closing or + // stopping. + } +} + +// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh. +// The embedding reactor must be sure to listen for messages on this channel to +// handle removing a peer. +func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) { + peerID, err := PeerIDFromString(string(peer.ID())) + if err != nil { + rs.Logger.Error("failed to remove peer", "peer", peer.ID(), "err", err) + return + } + + select { + case rs.PeerUpdates.updatesCh <- PeerUpdate{PeerID: peerID, Status: PeerStatusDown}: + rs.Logger.Debug( + "sent peer update", + "reactor", rs.Name, + "peer", peerID.String(), + "reason", reason, + "status", PeerStatusDown, + ) + + case <-rs.PeerUpdates.Done(): + // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. + // This is because there may be numerous spawned goroutines that are + // attempting to send on the updateCh go channel and when the reactor stops + // we do not want to preemptively close the channel as that could result in + // panics sending on a closed channel. This also means that reactors MUST + // be certain there are NO listeners on the updateCh channel when closing or + // stopping. + } +} + +// Receive implements a generic wrapper around implementing the Receive method +// on the legacy Reactor p2p interface. If the reactor is running, Receive will +// find the corresponding new p2p Channel, create and decode the appropriate +// proto.Message from the msgBytes, execute any validation and finally construct +// and send a p2p Envelope on the appropriate p2p Channel. +func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) { + if !rs.IsRunning() { + return + } + + cID := ChannelID(chID) + channelShim, ok := rs.Channels[cID] + if !ok { + rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID) + return + } + + peerID, err := PeerIDFromString(string(src.ID())) + if err != nil { + rs.Logger.Error("failed to convert peer ID", "peer", src, "ch_id", chID, "err", err) + return + } + + msg := proto.Clone(channelShim.Channel.messageType) + msg.Reset() + + if err := proto.Unmarshal(msgBytes, msg); err != nil { + rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "msg", msg, "err", err) + rs.Switch.StopPeerForError(src, err) + return + } + + validator, ok := msg.(messageValidator) + if ok { + if err := validator.Validate(); err != nil { + rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "msg", msg, "err", err) + rs.Switch.StopPeerForError(src, err) + return + } + } + + wrapper, ok := msg.(Wrapper) + if ok { + var err error + + msg, err = wrapper.Unwrap() + if err != nil { + rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "msg", msg, "err", err) + return + } + } + + select { + case channelShim.Channel.inCh <- Envelope{From: peerID, Message: msg}: + rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", peerID.String()) + + case <-channelShim.Channel.Done(): + // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel. + // This is because there may be numerous spawned goroutines that are + // attempting to send on the inbound channel and when the reactor stops we + // do not want to preemptively close the channel as that could result in + // panics sending on a closed channel. This also means that reactors MUST + // be certain there are NO listeners on the inbound channel when closing or + // stopping. + } +} diff --git a/p2p/shim_test.go b/p2p/shim_test.go new file mode 100644 index 0000000000..ce8b74e023 --- /dev/null +++ b/p2p/shim_test.go @@ -0,0 +1,208 @@ +package p2p_test + +import ( + "sync" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/p2p" + p2pmocks "github.com/lazyledger/lazyledger-core/p2p/mocks" + ssproto "github.com/lazyledger/lazyledger-core/proto/tendermint/statesync" +) + +var ( + channelID1 = byte(0x01) + channelID2 = byte(0x02) + + p2pCfg = config.DefaultP2PConfig() + + testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ + p2p.ChannelID(channelID1): { + MsgType: new(ssproto.Message), + Descriptor: &p2p.ChannelDescriptor{ + ID: channelID1, + Priority: 3, + SendQueueCapacity: 10, + RecvMessageCapacity: int(4e6), + }, + }, + p2p.ChannelID(channelID2): { + MsgType: new(ssproto.Message), + Descriptor: &p2p.ChannelDescriptor{ + ID: channelID2, + Priority: 1, + SendQueueCapacity: 4, + RecvMessageCapacity: int(16e6), + }, + }, + } +) + +type reactorShimTestSuite struct { + shim *p2p.ReactorShim + sw *p2p.Switch +} + +func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { + t.Helper() + + rts := &reactorShimTestSuite{ + shim: p2p.NewReactorShim("TestShim", testChannelShims), + } + + rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", func(_ int, sw *p2p.Switch) *p2p.Switch { + for _, peer := range peers { + p2p.AddPeerToSwitchPeerSet(sw, peer) + } + + sw.AddReactor(rts.shim.Name, rts.shim) + return sw + }) + + // start the reactor shim + require.NoError(t, rts.shim.Start()) + + t.Cleanup(func() { + require.NoError(t, rts.shim.Stop()) + + for _, chs := range rts.shim.Channels { + chs.Channel.Close() + } + }) + + return rts +} + +func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, p2p.PeerID) { + t.Helper() + + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID(id)) + + pID, err := p2p.PeerIDFromString(string(peer.ID())) + require.NoError(t, err) + + return peer, pID +} + +func TestReactorShim_GetChannel(t *testing.T) { + rts := setup(t, nil) + + p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1)) + require.NotNil(t, p2pCh) + require.Equal(t, p2pCh.ID(), p2p.ChannelID(channelID1)) + + p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03))) + require.Nil(t, p2pCh) +} + +func TestReactorShim_GetChannels(t *testing.T) { + rts := setup(t, nil) + + p2pChs := rts.shim.GetChannels() + require.Len(t, p2pChs, 2) + require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1)) + require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2)) +} + +func TestReactorShim_AddPeer(t *testing.T) { + peerA, peerIDA := simplePeer(t, "aa") + rts := setup(t, []p2p.Peer{peerA}) + + var wg sync.WaitGroup + wg.Add(1) + + var peerUpdate p2p.PeerUpdate + go func() { + peerUpdate = <-rts.shim.PeerUpdates.Updates() + wg.Done() + }() + + rts.shim.AddPeer(peerA) + wg.Wait() + + require.Equal(t, peerIDA, peerUpdate.PeerID) + require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status) +} + +func TestReactorShim_RemovePeer(t *testing.T) { + peerA, peerIDA := simplePeer(t, "aa") + rts := setup(t, []p2p.Peer{peerA}) + + var wg sync.WaitGroup + wg.Add(1) + + var peerUpdate p2p.PeerUpdate + go func() { + peerUpdate = <-rts.shim.PeerUpdates.Updates() + wg.Done() + }() + + rts.shim.RemovePeer(peerA, "test reason") + wg.Wait() + + require.Equal(t, peerIDA, peerUpdate.PeerID) + require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status) +} + +func TestReactorShim_Receive(t *testing.T) { + peerA, peerIDA := simplePeer(t, "aa") + rts := setup(t, []p2p.Peer{peerA}) + + msg := &ssproto.Message{ + Sum: &ssproto.Message_ChunkRequest{ + ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + }, + } + + bz, err := proto.Marshal(msg) + require.NoError(t, err) + + var wg sync.WaitGroup + + var response *ssproto.Message + peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) { + m := &ssproto.Message{} + require.NoError(t, proto.Unmarshal(args[1].([]byte), m)) + + response = m + wg.Done() + }).Return(true) + + p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)] + + wg.Add(2) + + // Simulate receiving the envelope in some real reactor and replying back with + // the same envelope and then closing the Channel. + go func() { + e := <-p2pCh.Channel.In() + require.Equal(t, peerIDA, e.From) + require.NotNil(t, e.Message) + + p2pCh.Channel.Out() <- p2p.Envelope{To: e.From, Message: e.Message} + p2pCh.Channel.Close() + wg.Done() + }() + + rts.shim.Receive(channelID1, peerA, bz) + + // wait until the mock peer called Send and we (fake) proxied the envelope + wg.Wait() + require.NotNil(t, response) + + m, err := response.Unwrap() + require.NoError(t, err) + require.Equal(t, msg.GetChunkRequest(), m) + + // Since p2pCh was closed in the simulated reactor above, calling Receive + // should not block. + rts.shim.Receive(channelID1, peerA, bz) + require.Empty(t, p2pCh.Channel.In()) + + peerA.AssertExpectations(t) +} diff --git a/p2p/switch.go b/p2p/switch.go index 9c6891dea7..fa84925aff 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -76,7 +76,7 @@ type Switch struct { dialing *cmap.CMap reconnecting *cmap.CMap nodeInfo NodeInfo // our node info - nodeKey *NodeKey // our node privkey + nodeKey NodeKey // our node privkey addrBook AddrBook // peers addresses with whom we'll maintain constant connection persistentPeersAddrs []*NetAddress @@ -212,7 +212,7 @@ func (sw *Switch) NodeInfo() NodeInfo { // SetNodeKey sets the switch's private key for authenticated encryption. // NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey *NodeKey) { +func (sw *Switch) SetNodeKey(nodeKey NodeKey) { sw.nodeKey = nodeKey } @@ -322,6 +322,10 @@ func (sw *Switch) Peers() IPeerSet { // If the peer is persistent, it will attempt to reconnect. // TODO: make record depending on reason. func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { + if !peer.IsRunning() { + return + } + sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) sw.stopAndRemovePeer(peer, reason) diff --git a/p2p/test_util.go b/p2p/test_util.go index 8e4b399768..50f4710152 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -6,7 +6,6 @@ import ( "time" "github.com/lazyledger/lazyledger-core/crypto" - "github.com/lazyledger/lazyledger-core/crypto/ed25519" "github.com/lazyledger/lazyledger-core/libs/log" tmnet "github.com/lazyledger/lazyledger-core/libs/net" tmrand "github.com/lazyledger/lazyledger-core/libs/rand" @@ -181,12 +180,10 @@ func MakeSwitch( opts ...SwitchOption, ) *Switch { - nodeKey := NodeKey{ - PrivKey: ed25519.GenPrivKey(), - } - nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i)) + nodeKey := GenNodeKey() + nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i)) addr, err := NewNetAddressString( - IDAddressString(nodeKey.ID(), nodeInfo.(DefaultNodeInfo).ListenAddr), + IDAddressString(nodeKey.ID, nodeInfo.(DefaultNodeInfo).ListenAddr), ) if err != nil { panic(err) @@ -201,7 +198,7 @@ func MakeSwitch( // TODO: let the config be passed in? sw := initSwitch(i, NewSwitch(cfg, t, opts...)) sw.SetLogger(log.TestingLogger().With("switch", i)) - sw.SetNodeKey(&nodeKey) + sw.SetNodeKey(nodeKey) ni := nodeInfo.(DefaultNodeInfo) for ch := range sw.reactorsByCh { @@ -234,12 +231,6 @@ func testPeerConn( ) (pc peerConn, err error) { conn := rawConn - // Fuzz connection - if cfg.TestFuzz { - // so we have time to do peer handshakes and get set up - conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) - } - // Encrypt connection conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey) if err != nil { diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 4e7a0f3d0c..e13c81534c 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -37,11 +37,9 @@ func newMultiplexTransport( func TestTransportMultiplexConnFilter(t *testing.T) { mt := newMultiplexTransport( emptyNodeInfo(), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, + GenNodeKey(), ) - id := mt.nodeKey.ID() + id := mt.nodeKey.ID MultiplexTransportConnFilters( func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, @@ -91,11 +89,9 @@ func TestTransportMultiplexConnFilter(t *testing.T) { func TestTransportMultiplexConnFilterTimeout(t *testing.T) { mt := newMultiplexTransport( emptyNodeInfo(), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, + GenNodeKey(), ) - id := mt.nodeKey.ID() + id := mt.nodeKey.ID MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) MultiplexTransportConnFilters( @@ -145,6 +141,7 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) { id, "transport", ), NodeKey{ + ID: id, PrivKey: pv, }, ) @@ -161,7 +158,7 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) { t.Fatal(err) } - laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + laddr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) // Connect more peers than max for i := 0; i <= maxIncomingConns; i++ { @@ -188,7 +185,7 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) { func TestTransportMultiplexAcceptMultiple(t *testing.T) { mt := testSetupMultiplexTransport(t) - laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + laddr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) var ( seed = rand.New(rand.NewSource(time.Now().UnixNano())) @@ -243,9 +240,11 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) { func testDialer(dialAddr NetAddress, errc chan error) { var ( pv = ed25519.GenPrivKey() + id = PubKeyToID(pv.PubKey()) dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), + testNodeInfo(id, defaultNodeName), NodeKey{ + ID: id, PrivKey: pv, }, ) @@ -275,7 +274,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { // Simulate slow Peer. go func() { - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + addr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) c, err := addr.Dial() if err != nil { @@ -323,11 +322,12 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { dialer = newMultiplexTransport( fastNodeInfo, NodeKey{ + ID: PubKeyToID(fastNodePV.PubKey()), PrivKey: fastNodePV, }, ) ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + addr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { @@ -362,15 +362,17 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { go func() { var ( pv = ed25519.GenPrivKey() + id = PubKeyToID(pv.PubKey()) dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty + testNodeInfo(id, ""), // Should not be empty NodeKey{ + ID: id, PrivKey: pv, }, ) ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + addr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { @@ -405,11 +407,9 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { testNodeInfo( PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer", ), - NodeKey{ - PrivKey: ed25519.GenPrivKey(), - }, + GenNodeKey(), ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + addr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { @@ -439,9 +439,11 @@ func TestTransportMultiplexDialRejectWrongID(t *testing.T) { var ( pv = ed25519.GenPrivKey() + id = PubKeyToID(pv.PubKey()) dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty + testNodeInfo(id, ""), // Should not be empty NodeKey{ + ID: id, PrivKey: pv, }, ) @@ -471,14 +473,16 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { go func() { var ( pv = ed25519.GenPrivKey() + id = PubKeyToID(pv.PubKey()) dialer = newMultiplexTransport( - testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"), + testNodeInfoWithNetwork(id, "dialer", "incompatible-network"), NodeKey{ + ID: id, PrivKey: pv, }, ) ) - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + addr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { @@ -505,7 +509,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { errc := make(chan error) go func() { - addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) + addr := NewNetAddress(mt.nodeKey.ID, mt.listener.Addr()) _, err := mt.Dial(*addr, peerConfig{}) if err != nil { @@ -631,6 +635,7 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { id, "transport", ), NodeKey{ + ID: id, PrivKey: pv, }, ) diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 07f3b684c9..c3a8d322d5 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -5,8 +5,6 @@ package trust import ( "fmt" - "io/ioutil" - "os" "testing" "github.com/stretchr/testify/assert" @@ -17,9 +15,7 @@ import ( ) func TestTrustMetricStoreSaveLoad(t *testing.T) { - dir, err := ioutil.TempDir("", "trust_test") - require.NoError(t, err) - defer os.Remove(dir) + dir := t.TempDir() historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) require.NoError(t, err) diff --git a/privval/file.go b/privval/file.go index 2651452ba8..d0bfda4528 100644 --- a/privval/file.go +++ b/privval/file.go @@ -11,6 +11,7 @@ import ( "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/ed25519" + "github.com/lazyledger/lazyledger-core/crypto/secp256k1" tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes" tmjson "github.com/lazyledger/lazyledger-core/libs/json" tmos "github.com/lazyledger/lazyledger-core/libs/os" @@ -170,8 +171,15 @@ func NewFilePV(privKey crypto.PrivKey, keyFilePath, stateFilePath string) *FileP // GenFilePV generates a new validator with randomly generated private key // and sets the filePaths, but does not call Save(). -func GenFilePV(keyFilePath, stateFilePath string) *FilePV { - return NewFilePV(ed25519.GenPrivKey(), keyFilePath, stateFilePath) +func GenFilePV(keyFilePath, stateFilePath, keyType string) (*FilePV, error) { + switch keyType { + case types.ABCIPubKeyTypeSecp256k1: + return NewFilePV(secp256k1.GenPrivKey(), keyFilePath, stateFilePath), nil + case "", types.ABCIPubKeyTypeEd25519: + return NewFilePV(ed25519.GenPrivKey(), keyFilePath, stateFilePath), nil + default: + return nil, fmt.Errorf("key type: %s is not supported", keyType) + } } // LoadFilePV loads a FilePV from the filePaths. The FilePV handles double @@ -227,15 +235,18 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { // LoadOrGenFilePV loads a FilePV from the given filePaths // or else generates a new one and saves it to the filePaths. -func LoadOrGenFilePV(keyFilePath, stateFilePath string) *FilePV { - var pv *FilePV +func LoadOrGenFilePV(keyFilePath, stateFilePath string) (*FilePV, error) { + var ( + pv *FilePV + err error + ) if tmos.FileExists(keyFilePath) { pv = LoadFilePV(keyFilePath, stateFilePath) } else { - pv = GenFilePV(keyFilePath, stateFilePath) + pv, err = GenFilePV(keyFilePath, stateFilePath, "") pv.Save() } - return pv + return pv, err } // GetAddress returns the address of the validator. diff --git a/privval/file_test.go b/privval/file_test.go index 01876c5e2b..6e76387316 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -28,7 +28,8 @@ func TestGenLoadValidator(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) height := int64(100) privVal.LastSignState.Height = height @@ -46,7 +47,8 @@ func TestResetValidator(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) emptyState := FilePVLastSignState{filePath: tempStateFile.Name()} // new priv val has empty state @@ -86,9 +88,11 @@ func TestLoadOrGenValidator(t *testing.T) { t.Error(err) } - privVal := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + privVal, err := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + require.NoError(t, err) addr := privVal.GetAddress() - privVal = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + privVal, err = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + require.NoError(t, err) assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") } @@ -164,7 +168,8 @@ func TestSignVote(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) randbytes := tmrand.Bytes(tmhash.Size) randbytes2 := tmrand.Bytes(tmhash.Size) @@ -217,7 +222,8 @@ func TestSignProposal(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) randbytes := tmrand.Bytes(tmhash.Size) randbytes2 := tmrand.Bytes(tmhash.Size) @@ -265,7 +271,8 @@ func TestDifferByTimestamp(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) randbytes := tmrand.Bytes(tmhash.Size) block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} height, round := int64(10), int32(1) diff --git a/privval/msgs_test.go b/privval/msgs_test.go index 411ecdfdd0..b678b88590 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -12,6 +12,7 @@ import ( "github.com/lazyledger/lazyledger-core/crypto/ed25519" cryptoenc "github.com/lazyledger/lazyledger-core/crypto/encoding" "github.com/lazyledger/lazyledger-core/crypto/tmhash" + cryptoproto "github.com/lazyledger/lazyledger-core/proto/tendermint/crypto" privproto "github.com/lazyledger/lazyledger-core/proto/tendermint/privval" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" "github.com/lazyledger/lazyledger-core/types" @@ -81,14 +82,14 @@ func TestPrivvalVectors(t *testing.T) { {"ping request", &privproto.PingRequest{}, "3a00"}, {"ping response", &privproto.PingResponse{}, "4200"}, {"pubKey request", &privproto.PubKeyRequest{}, "0a00"}, - {"pubKey response", &privproto.PubKeyResponse{PubKey: &ppk, Error: nil}, "12240a220a20556a436f1218d30942efe798420f51dc9b6a311b929c578257457d05c5fcf230"}, - {"pubKey response with error", &privproto.PubKeyResponse{PubKey: nil, Error: remoteError}, "121212100801120c697427732061206572726f72"}, + {"pubKey response", &privproto.PubKeyResponse{PubKey: ppk, Error: nil}, "12240a220a20556a436f1218d30942efe798420f51dc9b6a311b929c578257457d05c5fcf230"}, + {"pubKey response with error", &privproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1a760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, - {"Vote Response", &privproto.SignedVoteResponse{Vote: votepb, Error: nil}, "22760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, - {"Vote Response with error", &privproto.SignedVoteResponse{Vote: nil, Error: remoteError}, "221212100801120c697427732061206572726f72"}, + {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "22760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, + {"Vote Response with error", &privproto.SignedVoteResponse{Vote: tmproto.Vote{}, Error: remoteError}, "22250a11220212002a0b088092b8c398feffffff0112100801120c697427732061206572726f72"}, {"Proposal Request", &privproto.SignProposalRequest{Proposal: proposalpb}, "2a700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, - {"Proposal Response", &privproto.SignedProposalResponse{Proposal: proposalpb, Error: nil}, "32700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, - {"Proposal Response with error", &privproto.SignedProposalResponse{Proposal: nil, Error: remoteError}, "321212100801120c697427732061206572726f72"}, + {"Proposal Response", &privproto.SignedProposalResponse{Proposal: *proposalpb, Error: nil}, "32700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, + {"Proposal Response with error", &privproto.SignedProposalResponse{Proposal: tmproto.Proposal{}, Error: remoteError}, "32250a112a021200320b088092b8c398feffffff0112100801120c697427732061206572726f72"}, } for _, tc := range testCases { diff --git a/privval/secret_connection.go b/privval/secret_connection.go new file mode 100644 index 0000000000..f95268494e --- /dev/null +++ b/privval/secret_connection.go @@ -0,0 +1,469 @@ +package privval + +import ( + "bytes" + "crypto/cipher" + crand "crypto/rand" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "net" + "time" + + gogotypes "github.com/gogo/protobuf/types" + "github.com/gtank/merlin" + pool "github.com/libp2p/go-buffer-pool" + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/hkdf" + "golang.org/x/crypto/nacl/box" + + "github.com/lazyledger/lazyledger-core/crypto" + "github.com/lazyledger/lazyledger-core/crypto/ed25519" + cryptoenc "github.com/lazyledger/lazyledger-core/crypto/encoding" + "github.com/lazyledger/lazyledger-core/libs/async" + "github.com/lazyledger/lazyledger-core/libs/protoio" + tmsync "github.com/lazyledger/lazyledger-core/libs/sync" + tmprivval "github.com/lazyledger/lazyledger-core/proto/tendermint/privval" +) + +// This code has been duplicated from p2p/conn prior to the P2P refactor. +// It is left here temporarily until we migrate privval to gRPC. +// https://github.com/tendermint/tendermint/issues/4698 + +// 4 + 1024 == 1028 total frame size +const ( + dataLenSize = 4 + dataMaxSize = 1024 + totalFrameSize = dataMaxSize + dataLenSize + aeadSizeOverhead = 16 // overhead of poly 1305 authentication tag + aeadKeySize = chacha20poly1305.KeySize + aeadNonceSize = chacha20poly1305.NonceSize +) + +var ( + ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer") + + labelEphemeralLowerPublicKey = []byte("EPHEMERAL_LOWER_PUBLIC_KEY") + labelEphemeralUpperPublicKey = []byte("EPHEMERAL_UPPER_PUBLIC_KEY") + labelDHSecret = []byte("DH_SECRET") + labelSecretConnectionMac = []byte("SECRET_CONNECTION_MAC") + + secretConnKeyAndChallengeGen = []byte("TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN") +) + +// SecretConnection implements net.Conn. +// It is an implementation of the STS protocol. +// See https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf for +// details on the protocol. +// +// Consumers of the SecretConnection are responsible for authenticating +// the remote peer's pubkey against known information, like a nodeID. +// Otherwise they are vulnerable to MITM. +// (TODO(ismail): see also https://github.com/tendermint/tendermint/issues/3010) +type SecretConnection struct { + + // immutable + recvAead cipher.AEAD + sendAead cipher.AEAD + + remPubKey crypto.PubKey + conn io.ReadWriteCloser + + // net.Conn must be thread safe: + // https://golang.org/pkg/net/#Conn. + // Since we have internal mutable state, + // we need mtxs. But recv and send states + // are independent, so we can use two mtxs. + // All .Read are covered by recvMtx, + // all .Write are covered by sendMtx. + recvMtx tmsync.Mutex + recvBuffer []byte + recvNonce *[aeadNonceSize]byte + + sendMtx tmsync.Mutex + sendNonce *[aeadNonceSize]byte +} + +// MakeSecretConnection performs handshake and returns a new authenticated +// SecretConnection. +// Returns nil if there is an error in handshake. +// Caller should call conn.Close() +// See docs/sts-final.pdf for more information. +func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) { + var ( + locPubKey = locPrivKey.PubKey() + ) + + // Generate ephemeral keys for perfect forward secrecy. + locEphPub, locEphPriv := genEphKeys() + + // Write local ephemeral pubkey and receive one too. + // NOTE: every 32-byte string is accepted as a Curve25519 public key (see + // DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf) + remEphPub, err := shareEphPubKey(conn, locEphPub) + if err != nil { + return nil, err + } + + // Sort by lexical order. + loEphPub, hiEphPub := sort32(locEphPub, remEphPub) + + transcript := merlin.NewTranscript("TENDERMINT_SECRET_CONNECTION_TRANSCRIPT_HASH") + + transcript.AppendMessage(labelEphemeralLowerPublicKey, loEphPub[:]) + transcript.AppendMessage(labelEphemeralUpperPublicKey, hiEphPub[:]) + + // Check if the local ephemeral public key was the least, lexicographically + // sorted. + locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:]) + + // Compute common diffie hellman secret using X25519. + dhSecret, err := computeDHSecret(remEphPub, locEphPriv) + if err != nil { + return nil, err + } + + transcript.AppendMessage(labelDHSecret, dhSecret[:]) + + // Generate the secret used for receiving, sending, challenge via HKDF-SHA2 + // on the transcript state (which itself also uses HKDF-SHA2 to derive a key + // from the dhSecret). + recvSecret, sendSecret := deriveSecrets(dhSecret, locIsLeast) + + const challengeSize = 32 + var challenge [challengeSize]byte + challengeSlice := transcript.ExtractBytes(labelSecretConnectionMac, challengeSize) + + copy(challenge[:], challengeSlice[0:challengeSize]) + + sendAead, err := chacha20poly1305.New(sendSecret[:]) + if err != nil { + return nil, errors.New("invalid send SecretConnection Key") + } + recvAead, err := chacha20poly1305.New(recvSecret[:]) + if err != nil { + return nil, errors.New("invalid receive SecretConnection Key") + } + + sc := &SecretConnection{ + conn: conn, + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + } + + // Sign the challenge bytes for authentication. + locSignature, err := signChallenge(&challenge, locPrivKey) + if err != nil { + return nil, err + } + + // Share (in secret) each other's pubkey & challenge signature + authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature) + if err != nil { + return nil, err + } + + remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig + if _, ok := remPubKey.(ed25519.PubKey); !ok { + return nil, fmt.Errorf("expected ed25519 pubkey, got %T", remPubKey) + } + if !remPubKey.VerifySignature(challenge[:], remSignature) { + return nil, errors.New("challenge verification failed") + } + + // We've authorized. + sc.remPubKey = remPubKey + return sc, nil +} + +// RemotePubKey returns authenticated remote pubkey +func (sc *SecretConnection) RemotePubKey() crypto.PubKey { + return sc.remPubKey +} + +// Writes encrypted frames of `totalFrameSize + aeadSizeOverhead`. +// CONTRACT: data smaller than dataMaxSize is written atomically. +func (sc *SecretConnection) Write(data []byte) (n int, err error) { + sc.sendMtx.Lock() + defer sc.sendMtx.Unlock() + + for 0 < len(data) { + if err := func() error { + var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) + var frame = pool.Get(totalFrameSize) + defer func() { + pool.Put(sealedFrame) + pool.Put(frame) + }() + var chunk []byte + if dataMaxSize < len(data) { + chunk = data[:dataMaxSize] + data = data[dataMaxSize:] + } else { + chunk = data + data = nil + } + chunkLength := len(chunk) + binary.LittleEndian.PutUint32(frame, uint32(chunkLength)) + copy(frame[dataLenSize:], chunk) + + // encrypt the frame + sc.sendAead.Seal(sealedFrame[:0], sc.sendNonce[:], frame, nil) + incrNonce(sc.sendNonce) + // end encryption + + _, err = sc.conn.Write(sealedFrame) + if err != nil { + return err + } + n += len(chunk) + return nil + }(); err != nil { + return n, err + } + } + return n, err +} + +// CONTRACT: data smaller than dataMaxSize is read atomically. +func (sc *SecretConnection) Read(data []byte) (n int, err error) { + sc.recvMtx.Lock() + defer sc.recvMtx.Unlock() + + // read off and update the recvBuffer, if non-empty + if 0 < len(sc.recvBuffer) { + n = copy(data, sc.recvBuffer) + sc.recvBuffer = sc.recvBuffer[n:] + return + } + + // read off the conn + var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) + defer pool.Put(sealedFrame) + _, err = io.ReadFull(sc.conn, sealedFrame) + if err != nil { + return + } + + // decrypt the frame. + // reads and updates the sc.recvNonce + var frame = pool.Get(totalFrameSize) + defer pool.Put(frame) + _, err = sc.recvAead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil) + if err != nil { + return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) + } + incrNonce(sc.recvNonce) + // end decryption + + // copy checkLength worth into data, + // set recvBuffer to the rest. + var chunkLength = binary.LittleEndian.Uint32(frame) // read the first four bytes + if chunkLength > dataMaxSize { + return 0, errors.New("chunkLength is greater than dataMaxSize") + } + var chunk = frame[dataLenSize : dataLenSize+chunkLength] + n = copy(data, chunk) + if n < len(chunk) { + sc.recvBuffer = make([]byte, len(chunk)-n) + copy(sc.recvBuffer, chunk[n:]) + } + return n, err +} + +// Implements net.Conn +// nolint +func (sc *SecretConnection) Close() error { return sc.conn.Close() } +func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() } +func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() } +func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) } +func (sc *SecretConnection) SetReadDeadline(t time.Time) error { + return sc.conn.(net.Conn).SetReadDeadline(t) +} +func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { + return sc.conn.(net.Conn).SetWriteDeadline(t) +} + +func genEphKeys() (ephPub, ephPriv *[32]byte) { + var err error + // TODO: Probably not a problem but ask Tony: different from the rust implementation (uses x25519-dalek), + // we do not "clamp" the private key scalar: + // see: https://github.com/dalek-cryptography/x25519-dalek/blob/34676d336049df2bba763cc076a75e47ae1f170f/src/x25519.rs#L56-L74 + ephPub, ephPriv, err = box.GenerateKey(crand.Reader) + if err != nil { + panic("Could not generate ephemeral key-pair") + } + return +} + +func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { + + // Send our pubkey and receive theirs in tandem. + var trs, _ = async.Parallel( + func(_ int) (val interface{}, abort bool, err error) { + lc := *locEphPub + _, err = protoio.NewDelimitedWriter(conn).WriteMsg(&gogotypes.BytesValue{Value: lc[:]}) + if err != nil { + return nil, true, err // abort + } + return nil, false, nil + }, + func(_ int) (val interface{}, abort bool, err error) { + var bytes gogotypes.BytesValue + err = protoio.NewDelimitedReader(conn, 1024*1024).ReadMsg(&bytes) + if err != nil { + return nil, true, err // abort + } + + var _remEphPub [32]byte + copy(_remEphPub[:], bytes.Value) + return _remEphPub, false, nil + }, + ) + + // If error: + if trs.FirstError() != nil { + err = trs.FirstError() + return + } + + // Otherwise: + var _remEphPub = trs.FirstValue().([32]byte) + return &_remEphPub, nil +} + +func deriveSecrets( + dhSecret *[32]byte, + locIsLeast bool, +) (recvSecret, sendSecret *[aeadKeySize]byte) { + hash := sha256.New + hkdf := hkdf.New(hash, dhSecret[:], nil, secretConnKeyAndChallengeGen) + // get enough data for 2 aead keys, and a 32 byte challenge + res := new([2*aeadKeySize + 32]byte) + _, err := io.ReadFull(hkdf, res[:]) + if err != nil { + panic(err) + } + + recvSecret = new([aeadKeySize]byte) + sendSecret = new([aeadKeySize]byte) + + // bytes 0 through aeadKeySize - 1 are one aead key. + // bytes aeadKeySize through 2*aeadKeySize -1 are another aead key. + // which key corresponds to sending and receiving key depends on whether + // the local key is less than the remote key. + if locIsLeast { + copy(recvSecret[:], res[0:aeadKeySize]) + copy(sendSecret[:], res[aeadKeySize:aeadKeySize*2]) + } else { + copy(sendSecret[:], res[0:aeadKeySize]) + copy(recvSecret[:], res[aeadKeySize:aeadKeySize*2]) + } + + return +} + +// computeDHSecret computes a Diffie-Hellman shared secret key +// from our own local private key and the other's public key. +func computeDHSecret(remPubKey, locPrivKey *[32]byte) (*[32]byte, error) { + shrKey, err := curve25519.X25519(locPrivKey[:], remPubKey[:]) + if err != nil { + return nil, err + } + var shrKeyArray [32]byte + copy(shrKeyArray[:], shrKey) + return &shrKeyArray, nil +} + +func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { + if bytes.Compare(foo[:], bar[:]) < 0 { + lo = foo + hi = bar + } else { + lo = bar + hi = foo + } + return +} + +func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) ([]byte, error) { + signature, err := locPrivKey.Sign(challenge[:]) + if err != nil { + return nil, err + } + return signature, nil +} + +type authSigMessage struct { + Key crypto.PubKey + Sig []byte +} + +func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte) (recvMsg authSigMessage, err error) { + + // Send our info and receive theirs in tandem. + var trs, _ = async.Parallel( + func(_ int) (val interface{}, abort bool, err error) { + pbpk, err := cryptoenc.PubKeyToProto(pubKey) + if err != nil { + return nil, true, err + } + _, err = protoio.NewDelimitedWriter(sc).WriteMsg(&tmprivval.AuthSigMessage{PubKey: pbpk, Sig: signature}) + if err != nil { + return nil, true, err // abort + } + return nil, false, nil + }, + func(_ int) (val interface{}, abort bool, err error) { + var pba tmprivval.AuthSigMessage + err = protoio.NewDelimitedReader(sc, 1024*1024).ReadMsg(&pba) + if err != nil { + return nil, true, err // abort + } + + pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + if err != nil { + return nil, true, err // abort + } + + _recvMsg := authSigMessage{ + Key: pk, + Sig: pba.Sig, + } + return _recvMsg, false, nil + }, + ) + + // If error: + if trs.FirstError() != nil { + err = trs.FirstError() + return + } + + var _recvMsg = trs.FirstValue().(authSigMessage) + return _recvMsg, nil +} + +//-------------------------------------------------------------------------------- + +// Increment nonce little-endian by 1 with wraparound. +// Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four +// bytes. We only increment a 64 bit unsigned int in the remaining 8 bytes +// (little-endian in nonce[4:]). +func incrNonce(nonce *[aeadNonceSize]byte) { + counter := binary.LittleEndian.Uint64(nonce[4:]) + if counter == math.MaxUint64 { + // Terminates the session and makes sure the nonce would not re-used. + // See https://github.com/tendermint/tendermint/issues/3531 + panic("can't increase nonce without overflow") + } + counter++ + binary.LittleEndian.PutUint64(nonce[4:], counter) +} diff --git a/privval/signer_client.go b/privval/signer_client.go index 093a14a784..8791b16b91 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -82,7 +82,7 @@ func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - pk, err := cryptoenc.PubKeyFromProto(*resp.PubKey) + pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } @@ -105,7 +105,7 @@ func (sc *SignerClient) SignVote(chainID string, vote *tmproto.Vote) error { return &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - *vote = *resp.Vote + *vote = resp.Vote return nil } @@ -127,7 +127,7 @@ func (sc *SignerClient) SignProposal(chainID string, proposal *tmproto.Proposal) return &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - *proposal = *resp.Proposal + *proposal = resp.Proposal return nil } diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index ca00e25206..f362c0b82c 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -11,6 +11,7 @@ import ( "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/tmhash" tmrand "github.com/lazyledger/lazyledger-core/libs/rand" + cryptoproto "github.com/lazyledger/lazyledger-core/proto/tendermint/crypto" privvalproto "github.com/lazyledger/lazyledger-core/proto/tendermint/privval" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" "github.com/lazyledger/lazyledger-core/types" @@ -396,11 +397,11 @@ func brokenHandler(privVal types.PrivValidator, request privvalproto.Message, switch r := request.Sum.(type) { // This is broken and will answer most requests with a pubkey response case *privvalproto.Message_PubKeyRequest: - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: nil, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) case *privvalproto.Message_SignVoteRequest: - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: nil, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) case *privvalproto.Message_SignProposalRequest: - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: nil, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) case *privvalproto.Message_PingRequest: err, res = nil, mustWrapMsg(&privvalproto.PingResponse{}) default: diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go index 431c2b6ce1..7913cca7ed 100644 --- a/privval/signer_dialer_endpoint.go +++ b/privval/signer_dialer_endpoint.go @@ -15,24 +15,26 @@ const ( // SignerServiceEndpointOption sets an optional parameter on the SignerDialerEndpoint. type SignerServiceEndpointOption func(*SignerDialerEndpoint) -// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for connections -// from external signing processes. +// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for +// connections from client processes. func SignerDialerEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption { return func(ss *SignerDialerEndpoint) { ss.timeoutReadWrite = timeout } } -// SignerDialerEndpointConnRetries sets the amount of attempted retries to acceptNewConnection. +// SignerDialerEndpointConnRetries sets the amount of attempted retries to +// acceptNewConnection. func SignerDialerEndpointConnRetries(retries int) SignerServiceEndpointOption { return func(ss *SignerDialerEndpoint) { ss.maxConnRetries = retries } } -// SignerDialerEndpointRetryWaitInterval sets the retry wait interval to a custom value +// SignerDialerEndpointRetryWaitInterval sets the retry wait interval to a +// custom value. func SignerDialerEndpointRetryWaitInterval(interval time.Duration) SignerServiceEndpointOption { return func(ss *SignerDialerEndpoint) { ss.retryWait = interval } } -// SignerDialerEndpoint dials using its dialer and responds to any -// signature requests using its privVal. +// SignerDialerEndpoint dials using its dialer and responds to any signature +// requests using its privVal. type SignerDialerEndpoint struct { signerEndpoint @@ -57,13 +59,13 @@ func NewSignerDialerEndpoint( maxConnRetries: defaultMaxDialRetries, } + sd.BaseService = *service.NewBaseService(logger, "SignerDialerEndpoint", sd) + sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + for _, optionFunc := range options { optionFunc(sd) } - sd.BaseService = *service.NewBaseService(logger, "SignerDialerEndpoint", sd) - sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second - return sd } diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index ced23c511f..0cd900dea9 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -12,7 +12,7 @@ import ( ) const ( - defaultTimeoutReadWriteSeconds = 3 + defaultTimeoutReadWriteSeconds = 5 ) type signerEndpoint struct { diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index b07281d962..d81ab9f70b 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -11,11 +11,22 @@ import ( privvalproto "github.com/lazyledger/lazyledger-core/proto/tendermint/privval" ) -// SignerValidatorEndpointOption sets an optional parameter on the SocketVal. -type SignerValidatorEndpointOption func(*SignerListenerEndpoint) +// SignerListenerEndpointOption sets an optional parameter on the SignerListenerEndpoint. +type SignerListenerEndpointOption func(*SignerListenerEndpoint) + +// SignerListenerEndpointTimeoutReadWrite sets the read and write timeout for +// connections from external signing processes. +// +// Default: 5s +func SignerListenerEndpointTimeoutReadWrite(timeout time.Duration) SignerListenerEndpointOption { + return func(sl *SignerListenerEndpoint) { sl.signerEndpoint.timeoutReadWrite = timeout } +} -// SignerListenerEndpoint listens for an external process to dial in -// and keeps the connection alive by dropping and reconnecting +// SignerListenerEndpoint listens for an external process to dial in and keeps +// the connection alive by dropping and reconnecting. +// +// The process will send pings every ~3s (read/write timeout * 2/3) to keep the +// connection alive. type SignerListenerEndpoint struct { signerEndpoint @@ -25,6 +36,7 @@ type SignerListenerEndpoint struct { timeoutAccept time.Duration pingTimer *time.Ticker + pingInterval time.Duration instanceMtx tmsync.Mutex // Ensures instance public methods access, i.e. SendRequest } @@ -33,15 +45,21 @@ type SignerListenerEndpoint struct { func NewSignerListenerEndpoint( logger log.Logger, listener net.Listener, + options ...SignerListenerEndpointOption, ) *SignerListenerEndpoint { - sc := &SignerListenerEndpoint{ + sl := &SignerListenerEndpoint{ listener: listener, timeoutAccept: defaultTimeoutAcceptSeconds * time.Second, } - sc.BaseService = *service.NewBaseService(logger, "SignerListenerEndpoint", sc) - sc.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second - return sc + sl.BaseService = *service.NewBaseService(logger, "SignerListenerEndpoint", sl) + sl.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + + for _, optionFunc := range options { + optionFunc(sl) + } + + return sl } // OnStart implements service.Service. @@ -49,7 +67,9 @@ func (sl *SignerListenerEndpoint) OnStart() error { sl.connectRequestCh = make(chan struct{}) sl.connectionAvailableCh = make(chan net.Conn) - sl.pingTimer = time.NewTicker(defaultPingPeriodMilliseconds * time.Millisecond) + // NOTE: ping timeout must be less than read/write timeout + sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond + sl.pingTimer = time.NewTicker(sl.pingInterval) go sl.serviceLoop() go sl.pingLoop() @@ -103,6 +123,9 @@ func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*pr return nil, err } + // Reset pingTimer to avoid sending unnecessary pings. + sl.pingTimer.Reset(sl.pingInterval) + return &res, nil } @@ -117,6 +140,7 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error } // block until connected or timeout + sl.Logger.Info("SignerListener: Blocking for connection") sl.triggerConnect() err := sl.WaitConnection(sl.connectionAvailableCh, maxWait) if err != nil { diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index 93fcd65da9..76c128e885 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -168,7 +168,11 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite listener = tcpLn } - return NewSignerListenerEndpoint(logger, listener) + return NewSignerListenerEndpoint( + logger, + listener, + SignerListenerEndpointTimeoutReadWrite(testTimeoutReadWrite), + ) } func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 1a10818ac4..065e226c92 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -5,7 +5,9 @@ import ( "github.com/lazyledger/lazyledger-core/crypto" cryptoenc "github.com/lazyledger/lazyledger-core/crypto/encoding" + cryptoproto "github.com/lazyledger/lazyledger-core/proto/tendermint/crypto" privvalproto "github.com/lazyledger/lazyledger-core/proto/tendermint/privval" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" "github.com/lazyledger/lazyledger-core/types" ) @@ -22,14 +24,17 @@ func DefaultValidationRequestHandler( switch r := req.Sum.(type) { case *privvalproto.Message_PubKeyRequest: if r.PubKeyRequest.GetChainId() != chainID { - res = mustWrapMsg(&privvalproto.SignedVoteResponse{ - Vote: nil, Error: &privvalproto.RemoteSignerError{ + res = mustWrapMsg(&privvalproto.PubKeyResponse{ + PubKey: cryptoproto.PublicKey{}, Error: &privvalproto.RemoteSignerError{ Code: 0, Description: "unable to provide pubkey"}}) return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.PubKeyRequest.GetChainId(), chainID) } var pubKey crypto.PubKey pubKey, err = privVal.GetPubKey() + if err != nil { + return res, err + } pk, err := cryptoenc.PubKeyToProto(pubKey) if err != nil { return res, err @@ -37,15 +42,15 @@ func DefaultValidationRequestHandler( if err != nil { res = mustWrapMsg(&privvalproto.PubKeyResponse{ - PubKey: nil, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + PubKey: cryptoproto.PublicKey{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) } else { - res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: &pk, Error: nil}) + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: pk, Error: nil}) } case *privvalproto.Message_SignVoteRequest: if r.SignVoteRequest.ChainId != chainID { res = mustWrapMsg(&privvalproto.SignedVoteResponse{ - Vote: nil, Error: &privvalproto.RemoteSignerError{ + Vote: tmproto.Vote{}, Error: &privvalproto.RemoteSignerError{ Code: 0, Description: "unable to sign vote"}}) return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.SignVoteRequest.GetChainId(), chainID) } @@ -55,15 +60,15 @@ func DefaultValidationRequestHandler( err = privVal.SignVote(chainID, vote) if err != nil { res = mustWrapMsg(&privvalproto.SignedVoteResponse{ - Vote: nil, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + Vote: tmproto.Vote{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) } else { - res = mustWrapMsg(&privvalproto.SignedVoteResponse{Vote: vote, Error: nil}) + res = mustWrapMsg(&privvalproto.SignedVoteResponse{Vote: *vote, Error: nil}) } case *privvalproto.Message_SignProposalRequest: if r.SignProposalRequest.GetChainId() != chainID { - res = mustWrapMsg(&privvalproto.SignedVoteResponse{ - Vote: nil, Error: &privvalproto.RemoteSignerError{ + res = mustWrapMsg(&privvalproto.SignedProposalResponse{ + Proposal: tmproto.Proposal{}, Error: &privvalproto.RemoteSignerError{ Code: 0, Description: "unable to sign proposal"}}) return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.SignProposalRequest.GetChainId(), chainID) @@ -74,9 +79,9 @@ func DefaultValidationRequestHandler( err = privVal.SignProposal(chainID, proposal) if err != nil { res = mustWrapMsg(&privvalproto.SignedProposalResponse{ - Proposal: nil, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + Proposal: tmproto.Proposal{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) } else { - res = mustWrapMsg(&privvalproto.SignedProposalResponse{Proposal: proposal, Error: nil}) + res = mustWrapMsg(&privvalproto.SignedProposalResponse{Proposal: *proposal, Error: nil}) } case *privvalproto.Message_PingRequest: err, res = nil, mustWrapMsg(&privvalproto.PingResponse{}) diff --git a/privval/socket_dialers.go b/privval/socket_dialers.go index b89920a833..146270d4bb 100644 --- a/privval/socket_dialers.go +++ b/privval/socket_dialers.go @@ -7,7 +7,6 @@ import ( "github.com/lazyledger/lazyledger-core/crypto" tmnet "github.com/lazyledger/lazyledger-core/libs/net" - p2pconn "github.com/lazyledger/lazyledger-core/p2p/conn" ) // Socket errors. @@ -28,7 +27,7 @@ func DialTCPFn(addr string, timeoutReadWrite time.Duration, privKey crypto.PrivK err = conn.SetDeadline(deadline) } if err == nil { - conn, err = p2pconn.MakeSecretConnection(conn, privKey) + conn, err = MakeSecretConnection(conn, privKey) } return conn, err } diff --git a/privval/socket_listeners.go b/privval/socket_listeners.go index 635a6a7952..0c3a59710e 100644 --- a/privval/socket_listeners.go +++ b/privval/socket_listeners.go @@ -5,12 +5,10 @@ import ( "time" "github.com/lazyledger/lazyledger-core/crypto/ed25519" - p2pconn "github.com/lazyledger/lazyledger-core/p2p/conn" ) const ( - defaultTimeoutAcceptSeconds = 3 - defaultPingPeriodMilliseconds = 100 + defaultTimeoutAcceptSeconds = 3 ) // timeoutError can be used to check if an error returned from the netp package @@ -77,7 +75,7 @@ func (ln *TCPListener) Accept() (net.Conn, error) { // Wrap the conn in our timeout and encryption wrappers timeoutConn := newTimeoutConn(tc, ln.timeoutReadWrite) - secretConn, err := p2pconn.MakeSecretConnection(timeoutConn, ln.secretConnKey) + secretConn, err := MakeSecretConnection(timeoutConn, ln.secretConnKey) if err != nil { return nil, err } diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 277aa75521..0d6ed76abd 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -49,6 +49,7 @@ message RequestInfo { string version = 1; uint64 block_version = 2; uint64 p2p_version = 3; + string abci_version = 4; } message RequestInitChain { @@ -161,6 +162,7 @@ message ResponseFlush {} message ResponseInfo { string data = 1; + // this is the software version of the application. TODO: remove? string version = 2; uint64 app_version = 3; diff --git a/proto/tendermint/evidence/types.pb.go b/proto/tendermint/evidence/types.pb.go deleted file mode 100644 index 64a6335119..0000000000 --- a/proto/tendermint/evidence/types.pb.go +++ /dev/null @@ -1,669 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/evidence/types.proto - -package evidence - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/lazyledger/lazyledger-core/proto/tendermint/types" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type List struct { - Evidence []*types.Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence,omitempty"` -} - -func (m *List) Reset() { *m = List{} } -func (m *List) String() string { return proto.CompactTextString(m) } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_5e804d1c041a0e47, []int{0} -} -func (m *List) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_List.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *List) XXX_Merge(src proto.Message) { - xxx_messageInfo_List.Merge(m, src) -} -func (m *List) XXX_Size() int { - return m.Size() -} -func (m *List) XXX_DiscardUnknown() { - xxx_messageInfo_List.DiscardUnknown(m) -} - -var xxx_messageInfo_List proto.InternalMessageInfo - -func (m *List) GetEvidence() []*types.Evidence { - if m != nil { - return m.Evidence - } - return nil -} - -type Info struct { - Evidence types.Evidence `protobuf:"bytes,1,opt,name=evidence,proto3" json:"evidence"` - Time time.Time `protobuf:"bytes,2,opt,name=time,proto3,stdtime" json:"time"` - Validators []*types.Validator `protobuf:"bytes,3,rep,name=validators,proto3" json:"validators,omitempty"` - TotalVotingPower int64 `protobuf:"varint,4,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_5e804d1c041a0e47, []int{1} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) -} -func (m *Info) XXX_Size() int { - return m.Size() -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) -} - -var xxx_messageInfo_Info proto.InternalMessageInfo - -func (m *Info) GetEvidence() types.Evidence { - if m != nil { - return m.Evidence - } - return types.Evidence{} -} - -func (m *Info) GetTime() time.Time { - if m != nil { - return m.Time - } - return time.Time{} -} - -func (m *Info) GetValidators() []*types.Validator { - if m != nil { - return m.Validators - } - return nil -} - -func (m *Info) GetTotalVotingPower() int64 { - if m != nil { - return m.TotalVotingPower - } - return 0 -} - -func init() { - proto.RegisterType((*List)(nil), "tendermint.evidence.List") - proto.RegisterType((*Info)(nil), "tendermint.evidence.Info") -} - -func init() { proto.RegisterFile("tendermint/evidence/types.proto", fileDescriptor_5e804d1c041a0e47) } - -var fileDescriptor_5e804d1c041a0e47 = []byte{ - // 340 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4a, 0xf3, 0x40, - 0x14, 0x85, 0x33, 0x7f, 0xc3, 0x4f, 0x19, 0x37, 0x32, 0xba, 0x08, 0x55, 0x92, 0xd2, 0x55, 0x17, - 0x9a, 0x01, 0x05, 0x11, 0x14, 0x85, 0x82, 0x0b, 0xc1, 0x85, 0x04, 0x29, 0xe8, 0xa6, 0xa4, 0xcd, - 0x74, 0x1c, 0x48, 0x72, 0x43, 0x72, 0x5b, 0xa9, 0x4f, 0xd1, 0xc7, 0xea, 0xb2, 0x4b, 0x57, 0x2a, - 0xed, 0xca, 0xb7, 0x90, 0x4c, 0x93, 0x36, 0x52, 0x71, 0x77, 0x93, 0xf3, 0x9d, 0x7b, 0xcf, 0x21, - 0xa1, 0x0e, 0x8a, 0x38, 0x10, 0x69, 0xa4, 0x62, 0xe4, 0x62, 0xac, 0x02, 0x11, 0x0f, 0x04, 0xc7, - 0x49, 0x22, 0x32, 0x37, 0x49, 0x01, 0x81, 0xed, 0x6d, 0x00, 0xb7, 0x04, 0x1a, 0xfb, 0x12, 0x24, - 0x68, 0x9d, 0xe7, 0xd3, 0x0a, 0x6d, 0x38, 0x12, 0x40, 0x86, 0x82, 0xeb, 0xa7, 0xfe, 0x68, 0xc8, - 0x51, 0x45, 0x22, 0x43, 0x3f, 0x4a, 0x0a, 0xa0, 0x59, 0x39, 0xa6, 0x6f, 0xf0, 0xb1, 0x1f, 0xaa, - 0xc0, 0x47, 0x48, 0x0b, 0xe2, 0x70, 0x8b, 0xa8, 0x64, 0x69, 0x5d, 0x51, 0xf3, 0x4e, 0x65, 0xc8, - 0xce, 0x68, 0xbd, 0x8c, 0x62, 0x91, 0x66, 0xad, 0xbd, 0x73, 0xd2, 0x70, 0x2b, 0x31, 0x57, 0x96, - 0x9b, 0x82, 0xf0, 0xd6, 0x6c, 0xeb, 0x8b, 0x50, 0xf3, 0x36, 0x1e, 0x02, 0xbb, 0xfc, 0xb1, 0x80, - 0xfc, 0xbd, 0xa0, 0x63, 0xce, 0xde, 0x1d, 0x63, 0xb3, 0x86, 0x9d, 0x53, 0x33, 0x6f, 0x66, 0xfd, - 0x2b, 0x9c, 0xab, 0xda, 0x6e, 0x59, 0xdb, 0x7d, 0x28, 0x6b, 0x77, 0xea, 0xb9, 0x73, 0xfa, 0xe1, - 0x10, 0x4f, 0x3b, 0xd8, 0x05, 0xa5, 0xeb, 0xc6, 0x99, 0x55, 0xd3, 0xd1, 0x0f, 0xb6, 0x2f, 0x77, - 0x4b, 0xc6, 0xab, 0xe0, 0xec, 0x88, 0x32, 0x04, 0xf4, 0xc3, 0xde, 0x18, 0x50, 0xc5, 0xb2, 0x97, - 0xc0, 0x8b, 0x48, 0x2d, 0xb3, 0x49, 0xda, 0x35, 0x6f, 0x57, 0x2b, 0x5d, 0x2d, 0xdc, 0xe7, 0xef, - 0x3b, 0x8f, 0xb3, 0x85, 0x4d, 0xe6, 0x0b, 0x9b, 0x7c, 0x2e, 0x6c, 0x32, 0x5d, 0xda, 0xc6, 0x7c, - 0x69, 0x1b, 0x6f, 0x4b, 0xdb, 0x78, 0xba, 0x96, 0x0a, 0x9f, 0x47, 0x7d, 0x77, 0x00, 0x11, 0x0f, - 0xfd, 0xd7, 0x49, 0x28, 0x02, 0x29, 0xd2, 0xca, 0x78, 0x3c, 0x80, 0xb4, 0xf8, 0x8a, 0xfc, 0x97, - 0xdf, 0xa3, 0xff, 0x5f, 0x4b, 0xa7, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0x03, 0xf5, 0xd8, - 0x3c, 0x02, 0x00, 0x00, -} - -func (m *List) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *List) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Evidence) > 0 { - for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Evidence[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TotalVotingPower != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) - i-- - dAtA[i] = 0x20 - } - if len(m.Validators) > 0 { - for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintTypes(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x12 - { - size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *List) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Evidence) > 0 { - for _, e := range m.Evidence { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Evidence.Size() - n += 1 + l + sovTypes(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) - n += 1 + l + sovTypes(uint64(l)) - if len(m.Validators) > 0 { - for _, e := range m.Validators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.TotalVotingPower != 0 { - n += 1 + sovTypes(uint64(m.TotalVotingPower)) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *List) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: List: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Evidence = append(m.Evidence, &types.Evidence{}) - if err := m.Evidence[len(m.Evidence)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Validators = append(m.Validators, &types.Validator{}) - if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) - } - m.TotalVotingPower = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalVotingPower |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/proto/tendermint/evidence/types.proto b/proto/tendermint/evidence/types.proto deleted file mode 100644 index 85a6d0ecab..0000000000 --- a/proto/tendermint/evidence/types.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; -package tendermint.evidence; - -option go_package = "github.com/lazyledger/lazyledger-core/proto/tendermint/evidence"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "tendermint/types/validator.proto"; -import "tendermint/types/types.proto"; - -message List { - repeated tendermint.types.Evidence evidence = 1; -} - -message Info { - tendermint.types.Evidence evidence = 1 [(gogoproto.nullable) = false]; - google.protobuf.Timestamp time = 2 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - repeated tendermint.types.Validator validators = 3; - int64 total_voting_power = 4; -} diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go index f80741c304..61077b77de 100644 --- a/proto/tendermint/privval/types.pb.go +++ b/proto/tendermint/privval/types.pb.go @@ -5,6 +5,7 @@ package privval import ( fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" crypto "github.com/lazyledger/lazyledger-core/proto/tendermint/crypto" types "github.com/lazyledger/lazyledger-core/proto/tendermint/types" @@ -160,7 +161,7 @@ func (m *PubKeyRequest) GetChainId() string { // PubKeyResponse is a response message containing the public key. type PubKeyResponse struct { - PubKey *crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -197,11 +198,11 @@ func (m *PubKeyResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PubKeyResponse proto.InternalMessageInfo -func (m *PubKeyResponse) GetPubKey() *crypto.PublicKey { +func (m *PubKeyResponse) GetPubKey() crypto.PublicKey { if m != nil { return m.PubKey } - return nil + return crypto.PublicKey{} } func (m *PubKeyResponse) GetError() *RemoteSignerError { @@ -266,7 +267,7 @@ func (m *SignVoteRequest) GetChainId() string { // SignedVoteResponse is a response containing a signed vote or an error type SignedVoteResponse struct { - Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + Vote types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote"` Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -303,11 +304,11 @@ func (m *SignedVoteResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SignedVoteResponse proto.InternalMessageInfo -func (m *SignedVoteResponse) GetVote() *types.Vote { +func (m *SignedVoteResponse) GetVote() types.Vote { if m != nil { return m.Vote } - return nil + return types.Vote{} } func (m *SignedVoteResponse) GetError() *RemoteSignerError { @@ -372,7 +373,7 @@ func (m *SignProposalRequest) GetChainId() string { // SignedProposalResponse is response containing a signed proposal or an error type SignedProposalResponse struct { - Proposal *types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` + Proposal types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -409,11 +410,11 @@ func (m *SignedProposalResponse) XXX_DiscardUnknown() { var xxx_messageInfo_SignedProposalResponse proto.InternalMessageInfo -func (m *SignedProposalResponse) GetProposal() *types.Proposal { +func (m *SignedProposalResponse) GetProposal() types.Proposal { if m != nil { return m.Proposal } - return nil + return types.Proposal{} } func (m *SignedProposalResponse) GetError() *RemoteSignerError { @@ -660,6 +661,61 @@ func (*Message) XXX_OneofWrappers() []interface{} { } } +// AuthSigMessage is duplicated from p2p prior to the P2P refactor. +// It is used for the SecretConnection until we migrate privval to gRPC. +// https://github.com/tendermint/tendermint/issues/4698 +type AuthSigMessage struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"` +} + +func (m *AuthSigMessage) Reset() { *m = AuthSigMessage{} } +func (m *AuthSigMessage) String() string { return proto.CompactTextString(m) } +func (*AuthSigMessage) ProtoMessage() {} +func (*AuthSigMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{10} +} +func (m *AuthSigMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthSigMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthSigMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthSigMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthSigMessage.Merge(m, src) +} +func (m *AuthSigMessage) XXX_Size() int { + return m.Size() +} +func (m *AuthSigMessage) XXX_DiscardUnknown() { + xxx_messageInfo_AuthSigMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthSigMessage proto.InternalMessageInfo + +func (m *AuthSigMessage) GetPubKey() crypto.PublicKey { + if m != nil { + return m.PubKey + } + return crypto.PublicKey{} +} + +func (m *AuthSigMessage) GetSig() []byte { + if m != nil { + return m.Sig + } + return nil +} + func init() { proto.RegisterEnum("tendermint.privval.Errors", Errors_name, Errors_value) proto.RegisterType((*RemoteSignerError)(nil), "tendermint.privval.RemoteSignerError") @@ -672,58 +728,63 @@ func init() { proto.RegisterType((*PingRequest)(nil), "tendermint.privval.PingRequest") proto.RegisterType((*PingResponse)(nil), "tendermint.privval.PingResponse") proto.RegisterType((*Message)(nil), "tendermint.privval.Message") + proto.RegisterType((*AuthSigMessage)(nil), "tendermint.privval.AuthSigMessage") } func init() { proto.RegisterFile("tendermint/privval/types.proto", fileDescriptor_cb4e437a5328cf9c) } var fileDescriptor_cb4e437a5328cf9c = []byte{ - // 736 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4d, 0x4f, 0xdb, 0x4a, - 0x14, 0xb5, 0x21, 0x1f, 0x70, 0x43, 0x42, 0x18, 0x78, 0xbc, 0x80, 0x78, 0x7e, 0x79, 0x79, 0x6a, - 0x8b, 0x22, 0x35, 0x91, 0xa8, 0xda, 0x4d, 0xa5, 0x4a, 0x05, 0xac, 0x26, 0x42, 0x38, 0xe9, 0x24, - 0x14, 0x84, 0x54, 0x59, 0xf9, 0x98, 0x06, 0x8b, 0xc4, 0x33, 0xf5, 0x38, 0x48, 0xa9, 0xd4, 0x5d, - 0xb7, 0x95, 0xfa, 0x33, 0xba, 0xee, 0xaf, 0xe8, 0x92, 0x65, 0x97, 0x15, 0xfc, 0x91, 0x2a, 0xe3, - 0x89, 0x63, 0xe7, 0x03, 0xa9, 0x65, 0xe7, 0xb9, 0xe7, 0xde, 0x73, 0xcf, 0xf1, 0x1c, 0xcb, 0xa0, - 0xb9, 0xc4, 0x6e, 0x13, 0xa7, 0x67, 0xd9, 0x6e, 0x91, 0x39, 0xd6, 0xd5, 0x55, 0xa3, 0x5b, 0x74, - 0x07, 0x8c, 0xf0, 0x02, 0x73, 0xa8, 0x4b, 0x11, 0x1a, 0xe3, 0x05, 0x89, 0x6f, 0xef, 0x04, 0x66, - 0x5a, 0xce, 0x80, 0xb9, 0xb4, 0x78, 0x49, 0x06, 0x72, 0x22, 0x84, 0x0a, 0xa6, 0x20, 0x5f, 0xae, - 0x0c, 0x6b, 0x98, 0xf4, 0xa8, 0x4b, 0x6a, 0x56, 0xc7, 0x26, 0x8e, 0xee, 0x38, 0xd4, 0x41, 0x08, - 0x22, 0x2d, 0xda, 0x26, 0x19, 0x35, 0xab, 0xee, 0x46, 0xb1, 0x78, 0x46, 0x59, 0x48, 0xb4, 0x09, - 0x6f, 0x39, 0x16, 0x73, 0x2d, 0x6a, 0x67, 0x16, 0xb2, 0xea, 0xee, 0x32, 0x0e, 0x96, 0x72, 0x79, - 0x48, 0x56, 0xfb, 0xcd, 0x23, 0x32, 0xc0, 0xe4, 0x7d, 0x9f, 0x70, 0x17, 0x6d, 0xc1, 0x52, 0xeb, - 0xa2, 0x61, 0xd9, 0xa6, 0xd5, 0x16, 0x54, 0xcb, 0x38, 0x2e, 0xce, 0xe5, 0x76, 0xee, 0x93, 0x0a, - 0xa9, 0x51, 0x33, 0x67, 0xd4, 0xe6, 0x04, 0x3d, 0x85, 0x38, 0xeb, 0x37, 0xcd, 0x4b, 0x32, 0x10, - 0xcd, 0x89, 0xbd, 0x9d, 0x42, 0xc0, 0xab, 0xe7, 0xab, 0x50, 0xed, 0x37, 0xbb, 0x56, 0x6b, 0x38, - 0x16, 0x63, 0x62, 0x1c, 0x3d, 0x87, 0x28, 0x19, 0x8a, 0x16, 0x8a, 0x12, 0x7b, 0x0f, 0x0a, 0xd3, - 0x2f, 0xa8, 0x30, 0xe5, 0x10, 0x7b, 0x33, 0xb9, 0x33, 0x58, 0x1d, 0x56, 0xdf, 0x50, 0x97, 0x8c, - 0x44, 0xe7, 0x21, 0x72, 0x45, 0x5d, 0x22, 0x35, 0x6c, 0x06, 0xe9, 0xbc, 0xf7, 0x26, 0x9a, 0x45, - 0x4f, 0xc8, 0xe0, 0x42, 0xd8, 0xe0, 0x47, 0x40, 0x62, 0x5f, 0xdb, 0xe3, 0x96, 0x1e, 0x7f, 0x87, - 0xfc, 0x5e, 0xc6, 0x2e, 0x60, 0x7d, 0x58, 0xad, 0x3a, 0x94, 0x51, 0xde, 0xe8, 0x8e, 0xcc, 0x3d, - 0x83, 0x25, 0x26, 0x4b, 0x52, 0xc3, 0xf6, 0xb4, 0x06, 0x7f, 0xc8, 0xef, 0xbd, 0xcb, 0xe8, 0x67, - 0x15, 0x36, 0x3d, 0xa7, 0xe3, 0x65, 0xd2, 0xed, 0x9f, 0x6e, 0xbb, 0x97, 0xf3, 0x24, 0x24, 0xaa, - 0x96, 0xdd, 0x91, 0x8e, 0x73, 0x29, 0x58, 0xf1, 0x8e, 0x9e, 0xa6, 0xdc, 0xb7, 0x28, 0xc4, 0x8f, - 0x09, 0xe7, 0x8d, 0x0e, 0x41, 0x47, 0xb0, 0x2a, 0x13, 0x67, 0x3a, 0x5e, 0xbb, 0x94, 0xf9, 0xdf, - 0xac, 0x8d, 0xa1, 0x6c, 0x97, 0x14, 0x9c, 0x64, 0xa1, 0xb0, 0x1b, 0x90, 0x1e, 0x93, 0x79, 0xcb, - 0xa4, 0xfe, 0xdc, 0x5d, 0x6c, 0x5e, 0x67, 0x49, 0xc1, 0x29, 0x16, 0xfe, 0x1c, 0x5e, 0xc3, 0x1a, - 0xb7, 0x3a, 0xb6, 0x39, 0xcc, 0x82, 0x2f, 0x6f, 0x51, 0x10, 0xfe, 0x3f, 0x8b, 0x70, 0x22, 0xc7, - 0x25, 0x05, 0xaf, 0xf2, 0x89, 0x68, 0x9f, 0xc3, 0x06, 0x17, 0x37, 0x35, 0x22, 0x95, 0x32, 0x23, - 0x82, 0xf5, 0xe1, 0x3c, 0xd6, 0x70, 0x86, 0x4b, 0x0a, 0x46, 0x7c, 0x3a, 0xd9, 0x6f, 0xe1, 0x2f, - 0x21, 0x77, 0x74, 0x89, 0xbe, 0xe4, 0xa8, 0x20, 0x7f, 0x34, 0x8f, 0x7c, 0x22, 0xa1, 0x25, 0x05, - 0xaf, 0xf3, 0x19, 0xc1, 0x7d, 0x07, 0x19, 0x29, 0x3d, 0xb0, 0x40, 0xca, 0x8f, 0x89, 0x0d, 0xf9, - 0xf9, 0xf2, 0x27, 0x83, 0x59, 0x52, 0xf0, 0x26, 0x9f, 0x1d, 0xd9, 0x43, 0x58, 0x61, 0x96, 0xdd, - 0xf1, 0xd5, 0xc7, 0x05, 0xf7, 0xbf, 0x33, 0x6f, 0x70, 0x9c, 0xb2, 0x92, 0x82, 0x13, 0x6c, 0x7c, - 0x44, 0xaf, 0x20, 0x29, 0x59, 0xa4, 0xc4, 0x25, 0x41, 0x93, 0x9d, 0x4f, 0xe3, 0x0b, 0x5b, 0x61, - 0x81, 0xf3, 0x7e, 0x14, 0x16, 0x79, 0xbf, 0x97, 0xff, 0xaa, 0x42, 0x4c, 0x84, 0x9c, 0x23, 0x04, - 0x29, 0x1d, 0xe3, 0x0a, 0xae, 0x99, 0x27, 0xc6, 0x91, 0x51, 0x39, 0x35, 0xd2, 0x0a, 0xd2, 0x60, - 0xdb, 0xaf, 0xe9, 0x67, 0x55, 0xfd, 0xa0, 0xae, 0x1f, 0x9a, 0x58, 0xaf, 0x55, 0x2b, 0x46, 0x4d, - 0x4f, 0xab, 0x28, 0x03, 0x1b, 0x12, 0x37, 0x2a, 0xe6, 0x41, 0xc5, 0x30, 0xf4, 0x83, 0x7a, 0xb9, - 0x62, 0xa4, 0x17, 0xd0, 0x3f, 0xb0, 0x25, 0x91, 0x71, 0xd9, 0xac, 0x97, 0x8f, 0xf5, 0xca, 0x49, - 0x3d, 0xbd, 0x88, 0xfe, 0x86, 0x75, 0x09, 0x63, 0xfd, 0xe5, 0xa1, 0x0f, 0x44, 0x02, 0x8c, 0xa7, - 0xb8, 0x5c, 0xd7, 0x7d, 0x24, 0xba, 0x7f, 0xf6, 0xfd, 0x46, 0x53, 0xaf, 0x6f, 0x34, 0xf5, 0xe7, - 0x8d, 0xa6, 0x7e, 0xb9, 0xd5, 0x94, 0xeb, 0x5b, 0x4d, 0xf9, 0x71, 0xab, 0x29, 0xe7, 0x2f, 0x3a, - 0x96, 0x7b, 0xd1, 0x6f, 0x16, 0x5a, 0xb4, 0x57, 0xec, 0x36, 0x3e, 0x0c, 0xba, 0xa4, 0xdd, 0x21, - 0x4e, 0xe0, 0xf1, 0x71, 0x8b, 0x3a, 0xa4, 0x28, 0x7e, 0x4b, 0xc5, 0xe9, 0xbf, 0x60, 0x33, 0x26, - 0x90, 0x27, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x95, 0x1a, 0x1c, 0xe0, 0x22, 0x07, 0x00, 0x00, + // 789 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0xf3, 0x44, + 0x14, 0xb5, 0x9b, 0x57, 0x7b, 0xf3, 0x68, 0x3a, 0x2d, 0x25, 0x8d, 0x8a, 0x1b, 0x8c, 0x80, 0x2a, + 0x12, 0x09, 0x2a, 0x12, 0x9b, 0x22, 0xa4, 0x3e, 0x2c, 0x12, 0x45, 0x75, 0xc2, 0x24, 0xa5, 0x55, + 0x25, 0x64, 0xe5, 0x31, 0x38, 0x56, 0x13, 0x8f, 0xf1, 0x38, 0x95, 0xc2, 0x96, 0x1d, 0x2b, 0x24, + 0xfe, 0x04, 0x6b, 0x7e, 0x45, 0x97, 0x5d, 0xb2, 0x42, 0xa8, 0xfd, 0x23, 0x9f, 0x32, 0x9e, 0x38, + 0xce, 0xab, 0xfa, 0x3e, 0x75, 0x77, 0xe7, 0xde, 0x3b, 0xe7, 0x9e, 0x33, 0x73, 0xec, 0x01, 0xc5, + 0x23, 0x76, 0x8f, 0xb8, 0x43, 0xcb, 0xf6, 0xca, 0x8e, 0x6b, 0x3d, 0x3c, 0xb4, 0x07, 0x65, 0x6f, + 0xec, 0x10, 0x56, 0x72, 0x5c, 0xea, 0x51, 0x84, 0x66, 0xf5, 0x92, 0xa8, 0xe7, 0x0f, 0x43, 0x7b, + 0xba, 0xee, 0xd8, 0xf1, 0x68, 0xf9, 0x9e, 0x8c, 0xc5, 0x8e, 0xb9, 0x2a, 0x47, 0x0a, 0xe3, 0xe5, + 0xf7, 0x4c, 0x6a, 0x52, 0x1e, 0x96, 0x27, 0x91, 0x9f, 0x55, 0xab, 0xb0, 0x83, 0xc9, 0x90, 0x7a, + 0xa4, 0x69, 0x99, 0x36, 0x71, 0x35, 0xd7, 0xa5, 0x2e, 0x42, 0x10, 0xed, 0xd2, 0x1e, 0xc9, 0xc9, + 0x05, 0xf9, 0x38, 0x86, 0x79, 0x8c, 0x0a, 0x90, 0xec, 0x11, 0xd6, 0x75, 0x2d, 0xc7, 0xb3, 0xa8, + 0x9d, 0xdb, 0x28, 0xc8, 0xc7, 0x5b, 0x38, 0x9c, 0x52, 0x8b, 0x90, 0x6e, 0x8c, 0x3a, 0x35, 0x32, + 0xc6, 0xe4, 0xd7, 0x11, 0x61, 0x1e, 0x3a, 0x80, 0xcd, 0x6e, 0xbf, 0x6d, 0xd9, 0x86, 0xd5, 0xe3, + 0x50, 0x5b, 0x38, 0xc1, 0xd7, 0xd5, 0x9e, 0xfa, 0x87, 0x0c, 0x99, 0x69, 0x33, 0x73, 0xa8, 0xcd, + 0x08, 0x3a, 0x85, 0x84, 0x33, 0xea, 0x18, 0xf7, 0x64, 0xcc, 0x9b, 0x93, 0x27, 0x87, 0xa5, 0xd0, + 0x09, 0xf8, 0x6a, 0x4b, 0x8d, 0x51, 0x67, 0x60, 0x75, 0x6b, 0x64, 0x7c, 0x1e, 0x7d, 0xfc, 0xef, + 0x48, 0xc2, 0x71, 0x87, 0x83, 0xa0, 0x53, 0x88, 0x91, 0x09, 0x75, 0xce, 0x2b, 0x79, 0xf2, 0x79, + 0x69, 0xf9, 0xf0, 0x4a, 0x4b, 0x3a, 0xb1, 0xbf, 0x47, 0xbd, 0x85, 0xed, 0x49, 0xf6, 0x27, 0xea, + 0x91, 0x29, 0xf5, 0x22, 0x44, 0x1f, 0xa8, 0x47, 0x04, 0x93, 0xfd, 0x30, 0x9c, 0x7f, 0xa6, 0xbc, + 0x99, 0xf7, 0xcc, 0xc9, 0xdc, 0x98, 0x97, 0xf9, 0xbb, 0x0c, 0x88, 0x0f, 0xec, 0xf9, 0xe0, 0x42, + 0xea, 0xd7, 0xef, 0x83, 0x2e, 0x14, 0xfa, 0x33, 0xde, 0xa4, 0xaf, 0x0f, 0xbb, 0x93, 0x6c, 0xc3, + 0xa5, 0x0e, 0x65, 0xed, 0xc1, 0x54, 0xe3, 0xb7, 0xb0, 0xe9, 0x88, 0x94, 0x60, 0x92, 0x5f, 0x66, + 0x12, 0x6c, 0x0a, 0x7a, 0x5f, 0xd3, 0xfb, 0x97, 0x0c, 0xfb, 0xbe, 0xde, 0xd9, 0x30, 0xa1, 0xf9, + 0xbb, 0x0f, 0x99, 0x26, 0xb4, 0xcf, 0x66, 0xbe, 0x49, 0x7f, 0x1a, 0x92, 0x0d, 0xcb, 0x36, 0x85, + 0x6e, 0x35, 0x03, 0x29, 0x7f, 0xe9, 0x33, 0x53, 0xff, 0x89, 0x41, 0xe2, 0x8a, 0x30, 0xd6, 0x36, + 0x09, 0xaa, 0xc1, 0xb6, 0x30, 0xa1, 0xe1, 0xfa, 0xed, 0x82, 0xec, 0xa7, 0xab, 0x26, 0xce, 0xd9, + 0xbd, 0x22, 0xe1, 0xb4, 0x33, 0xe7, 0x7f, 0x1d, 0xb2, 0x33, 0x30, 0x7f, 0x98, 0xe0, 0xaf, 0xbe, + 0x86, 0xe6, 0x77, 0x56, 0x24, 0x9c, 0x71, 0xe6, 0xbf, 0x90, 0x1f, 0x61, 0x87, 0x59, 0xa6, 0x6d, + 0x4c, 0x1c, 0x11, 0xd0, 0x8b, 0x70, 0xc0, 0xcf, 0x56, 0x01, 0x2e, 0x98, 0xba, 0x22, 0xe1, 0x6d, + 0xb6, 0xe0, 0xf3, 0x3b, 0xd8, 0x63, 0xfc, 0xbe, 0xa6, 0xa0, 0x82, 0x66, 0x94, 0xa3, 0x7e, 0xb1, + 0x0e, 0x75, 0xde, 0xcf, 0x15, 0x09, 0x23, 0xb6, 0xec, 0xf2, 0x9f, 0xe1, 0x23, 0x4e, 0x77, 0x7a, + 0x89, 0x01, 0xe5, 0x18, 0x07, 0xff, 0x72, 0x1d, 0xf8, 0x82, 0x4f, 0x2b, 0x12, 0xde, 0x65, 0x2b, + 0xec, 0xfb, 0x0b, 0xe4, 0x04, 0xf5, 0xd0, 0x00, 0x41, 0x3f, 0xce, 0x27, 0x14, 0xd7, 0xd3, 0x5f, + 0xb4, 0x67, 0x45, 0xc2, 0xfb, 0x6c, 0xb5, 0x71, 0x2f, 0x21, 0xe5, 0x58, 0xb6, 0x19, 0xb0, 0x4f, + 0x70, 0xec, 0xa3, 0x95, 0x37, 0x38, 0x73, 0x59, 0x45, 0xc2, 0x49, 0x67, 0xb6, 0x44, 0x3f, 0x40, + 0x5a, 0xa0, 0x08, 0x8a, 0x9b, 0x1c, 0xa6, 0xb0, 0x1e, 0x26, 0x20, 0x96, 0x72, 0x42, 0xeb, 0xf3, + 0x18, 0x44, 0xd8, 0x68, 0xa8, 0x1a, 0x90, 0x39, 0x1b, 0x79, 0xfd, 0xa6, 0x65, 0x4e, 0xad, 0xfb, + 0xa6, 0xff, 0x67, 0x16, 0x22, 0xcc, 0x32, 0xb9, 0x3b, 0x53, 0x78, 0x12, 0x16, 0xff, 0x96, 0x21, + 0xce, 0xbf, 0x22, 0x86, 0x10, 0x64, 0x34, 0x8c, 0xeb, 0xb8, 0x69, 0x5c, 0xeb, 0x35, 0xbd, 0x7e, + 0xa3, 0x67, 0x25, 0xa4, 0x40, 0x3e, 0xc8, 0x69, 0xb7, 0x0d, 0xed, 0xa2, 0xa5, 0x5d, 0x1a, 0x58, + 0x6b, 0x36, 0xea, 0x7a, 0x53, 0xcb, 0xca, 0x28, 0x07, 0x7b, 0xa2, 0xae, 0xd7, 0x8d, 0x8b, 0xba, + 0xae, 0x6b, 0x17, 0xad, 0x6a, 0x5d, 0xcf, 0x6e, 0xa0, 0x4f, 0xe0, 0x40, 0x54, 0x66, 0x69, 0xa3, + 0x55, 0xbd, 0xd2, 0xea, 0xd7, 0xad, 0x6c, 0x04, 0x7d, 0x0c, 0xbb, 0xa2, 0x8c, 0xb5, 0xb3, 0xcb, + 0xa0, 0x10, 0x0d, 0x21, 0xde, 0xe0, 0x6a, 0x4b, 0x0b, 0x2a, 0xb1, 0xf3, 0xdb, 0xc7, 0x67, 0x45, + 0x7e, 0x7a, 0x56, 0xe4, 0xff, 0x9f, 0x15, 0xf9, 0xcf, 0x17, 0x45, 0x7a, 0x7a, 0x51, 0xa4, 0x7f, + 0x5f, 0x14, 0xe9, 0xee, 0x7b, 0xd3, 0xf2, 0xfa, 0xa3, 0x4e, 0xa9, 0x4b, 0x87, 0xe5, 0x41, 0xfb, + 0xb7, 0xf1, 0x80, 0xf4, 0x4c, 0xe2, 0x86, 0xc2, 0xaf, 0xba, 0xd4, 0x25, 0x65, 0xff, 0x55, 0x5c, + 0x7e, 0x8f, 0x3b, 0x71, 0x5e, 0xf9, 0xe6, 0x5d, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xd6, 0x7a, + 0x90, 0xac, 0x07, 0x00, 0x00, } func (m *RemoteSignerError) Marshal() (dAtA []byte, err error) { @@ -823,18 +884,16 @@ func (m *PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.PubKey != nil { - { - size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -912,18 +971,16 @@ func (m *SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Vote != nil { - { - size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -1001,18 +1058,16 @@ func (m *SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i-- dAtA[i] = 0x12 } - if m.Proposal != nil { - { - size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -1262,6 +1317,46 @@ func (m *Message_PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *AuthSigMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthSigMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthSigMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sig) > 0 { + i -= len(m.Sig) + copy(dAtA[i:], m.Sig) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sig))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -1308,10 +1403,8 @@ func (m *PubKeyResponse) Size() (n int) { } var l int _ = l - if m.PubKey != nil { - l = m.PubKey.Size() - n += 1 + l + sovTypes(uint64(l)) - } + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) if m.Error != nil { l = m.Error.Size() n += 1 + l + sovTypes(uint64(l)) @@ -1342,10 +1435,8 @@ func (m *SignedVoteResponse) Size() (n int) { } var l int _ = l - if m.Vote != nil { - l = m.Vote.Size() - n += 1 + l + sovTypes(uint64(l)) - } + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) if m.Error != nil { l = m.Error.Size() n += 1 + l + sovTypes(uint64(l)) @@ -1376,10 +1467,8 @@ func (m *SignedProposalResponse) Size() (n int) { } var l int _ = l - if m.Proposal != nil { - l = m.Proposal.Size() - n += 1 + l + sovTypes(uint64(l)) - } + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) if m.Error != nil { l = m.Error.Size() n += 1 + l + sovTypes(uint64(l)) @@ -1513,6 +1602,20 @@ func (m *Message_PingResponse) Size() (n int) { } return n } +func (m *AuthSigMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Sig) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 @@ -1767,9 +1870,6 @@ func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PubKey == nil { - m.PubKey = &crypto.PublicKey{} - } if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2013,9 +2113,6 @@ func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Vote == nil { - m.Vote = &types.Vote{} - } if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2259,9 +2356,6 @@ func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Proposal == nil { - m.Proposal = &types.Proposal{} - } if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2765,6 +2859,126 @@ func (m *Message) Unmarshal(dAtA []byte) error { } return nil } +func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthSigMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthSigMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sig = append(m.Sig[:0], dAtA[iNdEx:postIndex]...) + if m.Sig == nil { + m.Sig = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/privval/types.proto b/proto/tendermint/privval/types.proto index 9cb479a813..5adc99bff3 100644 --- a/proto/tendermint/privval/types.proto +++ b/proto/tendermint/privval/types.proto @@ -3,6 +3,7 @@ package tendermint.privval; import "tendermint/crypto/keys.proto"; import "tendermint/types/types.proto"; +import "gogoproto/gogo.proto"; option go_package = "github.com/lazyledger/lazyledger-core/proto/tendermint/privval"; @@ -27,7 +28,7 @@ message PubKeyRequest { // PubKeyResponse is a response message containing the public key. message PubKeyResponse { - tendermint.crypto.PublicKey pub_key = 1; + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; RemoteSignerError error = 2; } @@ -39,7 +40,7 @@ message SignVoteRequest { // SignedVoteResponse is a response containing a signed vote or an error message SignedVoteResponse { - tendermint.types.Vote vote = 1; + tendermint.types.Vote vote = 1 [(gogoproto.nullable) = false]; RemoteSignerError error = 2; } @@ -51,7 +52,7 @@ message SignProposalRequest { // SignedProposalResponse is response containing a signed proposal or an error message SignedProposalResponse { - tendermint.types.Proposal proposal = 1; + tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; RemoteSignerError error = 2; } @@ -73,3 +74,11 @@ message Message { PingResponse ping_response = 8; } } + +// AuthSigMessage is duplicated from p2p prior to the P2P refactor. +// It is used for the SecretConnection until we migrate privval to gRPC. +// https://github.com/tendermint/tendermint/issues/4698 +message AuthSigMessage { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + bytes sig = 2; +} diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go new file mode 100644 index 0000000000..792e7f64cf --- /dev/null +++ b/proto/tendermint/statesync/message.go @@ -0,0 +1,94 @@ +package statesync + +import ( + "errors" + fmt "fmt" + + proto "github.com/gogo/protobuf/proto" +) + +// Wrap implements the p2p Wrapper interface and wraps a state sync messages. +func (m *Message) Wrap(msg proto.Message) error { + switch msg := msg.(type) { + case *ChunkRequest: + m.Sum = &Message_ChunkRequest{ChunkRequest: msg} + + case *ChunkResponse: + m.Sum = &Message_ChunkResponse{ChunkResponse: msg} + + case *SnapshotsRequest: + m.Sum = &Message_SnapshotsRequest{SnapshotsRequest: msg} + + case *SnapshotsResponse: + m.Sum = &Message_SnapshotsResponse{SnapshotsResponse: msg} + + default: + return fmt.Errorf("unknown message: %T", msg) + } + + return nil +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped state sync +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_ChunkRequest: + return m.GetChunkRequest(), nil + + case *Message_ChunkResponse: + return m.GetChunkResponse(), nil + + case *Message_SnapshotsRequest: + return m.GetSnapshotsRequest(), nil + + case *Message_SnapshotsResponse: + return m.GetSnapshotsResponse(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} + +// Validate validates the message returning an error upon failure. +func (m *Message) Validate() error { + if m == nil { + return errors.New("message cannot be nil") + } + + switch msg := m.Sum.(type) { + case *Message_ChunkRequest: + if m.GetChunkRequest().Height == 0 { + return errors.New("height cannot be 0") + } + + case *Message_ChunkResponse: + if m.GetChunkResponse().Height == 0 { + return errors.New("height cannot be 0") + } + if m.GetChunkResponse().Missing && len(m.GetChunkResponse().Chunk) > 0 { + return errors.New("missing chunk cannot have contents") + } + if !m.GetChunkResponse().Missing && m.GetChunkResponse().Chunk == nil { + return errors.New("chunk cannot be nil") + } + + case *Message_SnapshotsRequest: + + case *Message_SnapshotsResponse: + if m.GetSnapshotsResponse().Height == 0 { + return errors.New("height cannot be 0") + } + if len(m.GetSnapshotsResponse().Hash) == 0 { + return errors.New("snapshot has no hash") + } + if m.GetSnapshotsResponse().Chunks == 0 { + return errors.New("snapshot has no chunks") + } + + default: + return fmt.Errorf("unknown message type: %T", msg) + } + + return nil +} diff --git a/statesync/messages_test.go b/proto/tendermint/statesync/message_test.go similarity index 58% rename from statesync/messages_test.go rename to proto/tendermint/statesync/message_test.go index aad3e09755..5a0bb8f777 100644 --- a/statesync/messages_test.go +++ b/proto/tendermint/statesync/message_test.go @@ -1,10 +1,10 @@ -package statesync +package statesync_test import ( "encoding/hex" "testing" - "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" ssproto "github.com/lazyledger/lazyledger-core/proto/tendermint/statesync" @@ -13,95 +13,164 @@ import ( func TestValidateMsg(t *testing.T) { testcases := map[string]struct { - msg proto.Message - valid bool + msg proto.Message + validMsg bool + valid bool }{ - "nil": {nil, false}, - "unrelated": {&tmproto.Block{}, false}, + "nil": {nil, false, false}, + "unrelated": {&tmproto.Block{}, false, false}, - "ChunkRequest valid": {&ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, true}, - "ChunkRequest 0 height": {&ssproto.ChunkRequest{Height: 0, Format: 1, Index: 1}, false}, - "ChunkRequest 0 format": {&ssproto.ChunkRequest{Height: 1, Format: 0, Index: 1}, true}, - "ChunkRequest 0 chunk": {&ssproto.ChunkRequest{Height: 1, Format: 1, Index: 0}, true}, + "ChunkRequest valid": {&ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, true, true}, + "ChunkRequest 0 height": {&ssproto.ChunkRequest{Height: 0, Format: 1, Index: 1}, true, false}, + "ChunkRequest 0 format": {&ssproto.ChunkRequest{Height: 1, Format: 0, Index: 1}, true, true}, + "ChunkRequest 0 chunk": {&ssproto.ChunkRequest{Height: 1, Format: 1, Index: 0}, true, true}, "ChunkResponse valid": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}, - true}, + true, + true, + }, "ChunkResponse 0 height": { &ssproto.ChunkResponse{Height: 0, Format: 1, Index: 1, Chunk: []byte{1}}, - false}, + true, + false, + }, "ChunkResponse 0 format": { &ssproto.ChunkResponse{Height: 1, Format: 0, Index: 1, Chunk: []byte{1}}, - true}, + true, + true, + }, "ChunkResponse 0 chunk": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}, - true}, + true, + true, + }, "ChunkResponse empty body": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{}}, - true}, + true, + true, + }, "ChunkResponse nil body": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}, - false}, + true, + false, + }, "ChunkResponse missing": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, - true}, + true, + true, + }, "ChunkResponse missing with empty": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{}}, - true}, + true, + true, + }, "ChunkResponse missing with body": { &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{1}}, - false}, + true, + false, + }, - "SnapshotsRequest valid": {&ssproto.SnapshotsRequest{}, true}, + "SnapshotsRequest valid": {&ssproto.SnapshotsRequest{}, true, true}, "SnapshotsResponse valid": { &ssproto.SnapshotsResponse{Height: 1, Format: 1, Chunks: 2, Hash: []byte{1}}, - true}, + true, + true, + }, "SnapshotsResponse 0 height": { &ssproto.SnapshotsResponse{Height: 0, Format: 1, Chunks: 2, Hash: []byte{1}}, - false}, + true, + false, + }, "SnapshotsResponse 0 format": { &ssproto.SnapshotsResponse{Height: 1, Format: 0, Chunks: 2, Hash: []byte{1}}, - true}, + true, + true, + }, "SnapshotsResponse 0 chunks": { &ssproto.SnapshotsResponse{Height: 1, Format: 1, Hash: []byte{1}}, - false}, + true, + false, + }, "SnapshotsResponse no hash": { &ssproto.SnapshotsResponse{Height: 1, Format: 1, Chunks: 2, Hash: []byte{}}, - false}, + true, + false, + }, } + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - err := validateMsg(tc.msg) + msg := new(ssproto.Message) + + if tc.validMsg { + require.NoError(t, msg.Wrap(tc.msg)) + } else { + require.Error(t, msg.Wrap(tc.msg)) + } + if tc.valid { - require.NoError(t, err) + require.NoError(t, msg.Validate()) } else { - require.Error(t, err) + require.Error(t, msg.Validate()) } }) } } -//nolint:lll // ignore line length func TestStateSyncVectors(t *testing.T) { - testCases := []struct { testName string msg proto.Message expBytes string }{ - {"SnapshotsRequest", &ssproto.SnapshotsRequest{}, "0a00"}, - {"SnapshotsResponse", &ssproto.SnapshotsResponse{Height: 1, Format: 2, Chunks: 3, Hash: []byte("chuck hash"), Metadata: []byte("snapshot metadata")}, "1225080110021803220a636875636b20686173682a11736e617073686f74206d65746164617461"}, - {"ChunkRequest", &ssproto.ChunkRequest{Height: 1, Format: 2, Index: 3}, "1a06080110021803"}, - {"ChunkResponse", &ssproto.ChunkResponse{Height: 1, Format: 2, Index: 3, Chunk: []byte("it's a chunk")}, "2214080110021803220c697427732061206368756e6b"}, + { + "SnapshotsRequest", + &ssproto.SnapshotsRequest{}, + "0a00", + }, + { + "SnapshotsResponse", + &ssproto.SnapshotsResponse{ + Height: 1, + Format: 2, + Chunks: 3, + Hash: []byte("chuck hash"), + Metadata: []byte("snapshot metadata"), + }, + "1225080110021803220a636875636b20686173682a11736e617073686f74206d65746164617461", + }, + { + "ChunkRequest", + &ssproto.ChunkRequest{ + Height: 1, + Format: 2, + Index: 3, + }, + "1a06080110021803", + }, + { + "ChunkResponse", + &ssproto.ChunkResponse{ + Height: 1, + Format: 2, + Index: 3, + Chunk: []byte("it's a chunk"), + }, + "2214080110021803220c697427732061206368756e6b", + }, } for _, tc := range testCases { tc := tc - bz := mustEncodeMsg(tc.msg) + msg := new(ssproto.Message) + require.NoError(t, msg.Wrap(tc.msg)) + bz, err := msg.Marshal() + require.NoError(t, err) require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } } diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto index cf536be807..0a33006783 100644 --- a/proto/tendermint/types/block.proto +++ b/proto/tendermint/types/block.proto @@ -7,8 +7,8 @@ import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; message Block { - Header header = 1 [(gogoproto.nullable) = false]; - Data data = 2 [(gogoproto.nullable) = false]; - DataAvailabilityHeader data_availability_header = 3; - Commit last_commit = 4; + Header header = 1 [(gogoproto.nullable) = false]; + Data data = 2 [(gogoproto.nullable) = false]; + DataAvailabilityHeader data_availability_header = 3; + Commit last_commit = 4; } diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 41146dc7df..44c6077ad7 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -422,7 +422,7 @@ type Data struct { // This means that block.AppHash does not include these txs. Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` IntermediateStateRoots IntermediateStateRoots `protobuf:"bytes,2,opt,name=intermediate_state_roots,json=intermediateStateRoots,proto3" json:"intermediate_state_roots"` - Evidence EvidenceData `protobuf:"bytes,3,opt,name=evidence,proto3" json:"evidence"` + Evidence EvidenceList `protobuf:"bytes,3,opt,name=evidence,proto3" json:"evidence"` Messages Messages `protobuf:"bytes,4,opt,name=messages,proto3" json:"messages"` } @@ -473,11 +473,11 @@ func (m *Data) GetIntermediateStateRoots() IntermediateStateRoots { return IntermediateStateRoots{} } -func (m *Data) GetEvidence() EvidenceData { +func (m *Data) GetEvidence() EvidenceList { if m != nil { return m.Evidence } - return EvidenceData{} + return EvidenceList{} } func (m *Data) GetMessages() Messages { @@ -487,18 +487,105 @@ func (m *Data) GetMessages() Messages { return Messages{} } -// DuplicateVoteEvidence contains evidence a validator signed two conflicting -// votes. +type Evidence struct { + // Types that are valid to be assigned to Sum: + // *Evidence_DuplicateVoteEvidence + // *Evidence_LightClientAttackEvidence + Sum isEvidence_Sum `protobuf_oneof:"sum"` +} + +func (m *Evidence) Reset() { *m = Evidence{} } +func (m *Evidence) String() string { return proto.CompactTextString(m) } +func (*Evidence) ProtoMessage() {} +func (*Evidence) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{5} +} +func (m *Evidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Evidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Evidence.Merge(m, src) +} +func (m *Evidence) XXX_Size() int { + return m.Size() +} +func (m *Evidence) XXX_DiscardUnknown() { + xxx_messageInfo_Evidence.DiscardUnknown(m) +} + +var xxx_messageInfo_Evidence proto.InternalMessageInfo + +type isEvidence_Sum interface { + isEvidence_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Evidence_DuplicateVoteEvidence struct { + DuplicateVoteEvidence *DuplicateVoteEvidence `protobuf:"bytes,1,opt,name=duplicate_vote_evidence,json=duplicateVoteEvidence,proto3,oneof" json:"duplicate_vote_evidence,omitempty"` +} +type Evidence_LightClientAttackEvidence struct { + LightClientAttackEvidence *LightClientAttackEvidence `protobuf:"bytes,2,opt,name=light_client_attack_evidence,json=lightClientAttackEvidence,proto3,oneof" json:"light_client_attack_evidence,omitempty"` +} + +func (*Evidence_DuplicateVoteEvidence) isEvidence_Sum() {} +func (*Evidence_LightClientAttackEvidence) isEvidence_Sum() {} + +func (m *Evidence) GetSum() isEvidence_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Evidence) GetDuplicateVoteEvidence() *DuplicateVoteEvidence { + if x, ok := m.GetSum().(*Evidence_DuplicateVoteEvidence); ok { + return x.DuplicateVoteEvidence + } + return nil +} + +func (m *Evidence) GetLightClientAttackEvidence() *LightClientAttackEvidence { + if x, ok := m.GetSum().(*Evidence_LightClientAttackEvidence); ok { + return x.LightClientAttackEvidence + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Evidence) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Evidence_DuplicateVoteEvidence)(nil), + (*Evidence_LightClientAttackEvidence)(nil), + } +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. type DuplicateVoteEvidence struct { - VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` - VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` + VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` + VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` + TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + ValidatorPower int64 `protobuf:"varint,4,opt,name=validator_power,json=validatorPower,proto3" json:"validator_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` } func (m *DuplicateVoteEvidence) Reset() { *m = DuplicateVoteEvidence{} } func (m *DuplicateVoteEvidence) String() string { return proto.CompactTextString(m) } func (*DuplicateVoteEvidence) ProtoMessage() {} func (*DuplicateVoteEvidence) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{5} + return fileDescriptor_d3a6e55e2345de56, []int{6} } func (m *DuplicateVoteEvidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -541,16 +628,41 @@ func (m *DuplicateVoteEvidence) GetVoteB() *Vote { return nil } +func (m *DuplicateVoteEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetValidatorPower() int64 { + if m != nil { + return m.ValidatorPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. type LightClientAttackEvidence struct { - ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` - CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` + ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` + CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` + ByzantineValidators []*Validator `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators,omitempty"` + TotalVotingPower int64 `protobuf:"varint,4,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` } func (m *LightClientAttackEvidence) Reset() { *m = LightClientAttackEvidence{} } func (m *LightClientAttackEvidence) String() string { return proto.CompactTextString(m) } func (*LightClientAttackEvidence) ProtoMessage() {} func (*LightClientAttackEvidence) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{6} + return fileDescriptor_d3a6e55e2345de56, []int{7} } func (m *LightClientAttackEvidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -593,108 +705,43 @@ func (m *LightClientAttackEvidence) GetCommonHeight() int64 { return 0 } -type Evidence struct { - // Types that are valid to be assigned to Sum: - // *Evidence_DuplicateVoteEvidence - // *Evidence_LightClientAttackEvidence - Sum isEvidence_Sum `protobuf_oneof:"sum"` -} - -func (m *Evidence) Reset() { *m = Evidence{} } -func (m *Evidence) String() string { return proto.CompactTextString(m) } -func (*Evidence) ProtoMessage() {} -func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_d3a6e55e2345de56, []int{7} -} -func (m *Evidence) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Evidence) XXX_Merge(src proto.Message) { - xxx_messageInfo_Evidence.Merge(m, src) -} -func (m *Evidence) XXX_Size() int { - return m.Size() -} -func (m *Evidence) XXX_DiscardUnknown() { - xxx_messageInfo_Evidence.DiscardUnknown(m) -} - -var xxx_messageInfo_Evidence proto.InternalMessageInfo - -type isEvidence_Sum interface { - isEvidence_Sum() - MarshalTo([]byte) (int, error) - Size() int -} - -type Evidence_DuplicateVoteEvidence struct { - DuplicateVoteEvidence *DuplicateVoteEvidence `protobuf:"bytes,1,opt,name=duplicate_vote_evidence,json=duplicateVoteEvidence,proto3,oneof" json:"duplicate_vote_evidence,omitempty"` -} -type Evidence_LightClientAttackEvidence struct { - LightClientAttackEvidence *LightClientAttackEvidence `protobuf:"bytes,2,opt,name=light_client_attack_evidence,json=lightClientAttackEvidence,proto3,oneof" json:"light_client_attack_evidence,omitempty"` -} - -func (*Evidence_DuplicateVoteEvidence) isEvidence_Sum() {} -func (*Evidence_LightClientAttackEvidence) isEvidence_Sum() {} - -func (m *Evidence) GetSum() isEvidence_Sum { +func (m *LightClientAttackEvidence) GetByzantineValidators() []*Validator { if m != nil { - return m.Sum + return m.ByzantineValidators } return nil } -func (m *Evidence) GetDuplicateVoteEvidence() *DuplicateVoteEvidence { - if x, ok := m.GetSum().(*Evidence_DuplicateVoteEvidence); ok { - return x.DuplicateVoteEvidence - } - return nil -} - -func (m *Evidence) GetLightClientAttackEvidence() *LightClientAttackEvidence { - if x, ok := m.GetSum().(*Evidence_LightClientAttackEvidence); ok { - return x.LightClientAttackEvidence +func (m *LightClientAttackEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower } - return nil + return 0 } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Evidence) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Evidence_DuplicateVoteEvidence)(nil), - (*Evidence_LightClientAttackEvidence)(nil), +func (m *LightClientAttackEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp } + return time.Time{} } -// EvidenceData contains any evidence of malicious wrong-doing by validators -type EvidenceData struct { +type EvidenceList struct { Evidence []Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence"` } -func (m *EvidenceData) Reset() { *m = EvidenceData{} } -func (m *EvidenceData) String() string { return proto.CompactTextString(m) } -func (*EvidenceData) ProtoMessage() {} -func (*EvidenceData) Descriptor() ([]byte, []int) { +func (m *EvidenceList) Reset() { *m = EvidenceList{} } +func (m *EvidenceList) String() string { return proto.CompactTextString(m) } +func (*EvidenceList) ProtoMessage() {} +func (*EvidenceList) Descriptor() ([]byte, []int) { return fileDescriptor_d3a6e55e2345de56, []int{8} } -func (m *EvidenceData) XXX_Unmarshal(b []byte) error { +func (m *EvidenceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *EvidenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EvidenceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_EvidenceData.Marshal(b, m, deterministic) + return xxx_messageInfo_EvidenceList.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -704,19 +751,19 @@ func (m *EvidenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *EvidenceData) XXX_Merge(src proto.Message) { - xxx_messageInfo_EvidenceData.Merge(m, src) +func (m *EvidenceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceList.Merge(m, src) } -func (m *EvidenceData) XXX_Size() int { +func (m *EvidenceList) XXX_Size() int { return m.Size() } -func (m *EvidenceData) XXX_DiscardUnknown() { - xxx_messageInfo_EvidenceData.DiscardUnknown(m) +func (m *EvidenceList) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceList.DiscardUnknown(m) } -var xxx_messageInfo_EvidenceData proto.InternalMessageInfo +var xxx_messageInfo_EvidenceList proto.InternalMessageInfo -func (m *EvidenceData) GetEvidence() []Evidence { +func (m *EvidenceList) GetEvidence() []Evidence { if m != nil { return m.Evidence } @@ -1500,10 +1547,10 @@ func init() { proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") proto.RegisterType((*Header)(nil), "tendermint.types.Header") proto.RegisterType((*Data)(nil), "tendermint.types.Data") + proto.RegisterType((*Evidence)(nil), "tendermint.types.Evidence") proto.RegisterType((*DuplicateVoteEvidence)(nil), "tendermint.types.DuplicateVoteEvidence") proto.RegisterType((*LightClientAttackEvidence)(nil), "tendermint.types.LightClientAttackEvidence") - proto.RegisterType((*Evidence)(nil), "tendermint.types.Evidence") - proto.RegisterType((*EvidenceData)(nil), "tendermint.types.EvidenceData") + proto.RegisterType((*EvidenceList)(nil), "tendermint.types.EvidenceList") proto.RegisterType((*IntermediateStateRoots)(nil), "tendermint.types.IntermediateStateRoots") proto.RegisterType((*Messages)(nil), "tendermint.types.Messages") proto.RegisterType((*Message)(nil), "tendermint.types.Message") @@ -1521,114 +1568,119 @@ func init() { func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1712 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x23, 0xc7, - 0x11, 0xe6, 0xf0, 0x21, 0x92, 0x45, 0x52, 0xa2, 0x06, 0x5a, 0x2d, 0xc5, 0xdd, 0xa5, 0x98, 0xc9, - 0xc3, 0xb2, 0x1d, 0x53, 0x9b, 0x75, 0x10, 0x24, 0x80, 0x63, 0x98, 0xa4, 0xe4, 0x15, 0x63, 0x3d, - 0x88, 0x21, 0xbd, 0x79, 0x5c, 0x06, 0x4d, 0x4e, 0x8b, 0x9c, 0xec, 0x70, 0x7a, 0x30, 0xdd, 0xd4, - 0x4a, 0x7b, 0xcc, 0x29, 0x50, 0x2e, 0x3e, 0xe5, 0xa6, 0x53, 0x72, 0xc8, 0x3d, 0x7f, 0x20, 0xc8, - 0xc9, 0x47, 0xdf, 0x92, 0x4b, 0x9c, 0x40, 0x1b, 0x04, 0xf9, 0x19, 0x41, 0x3f, 0x66, 0x38, 0x14, - 0x49, 0xc7, 0x58, 0x2c, 0x7c, 0x11, 0xa6, 0xab, 0xbe, 0x7a, 0x57, 0x75, 0x35, 0x05, 0x0f, 0x19, - 0xf6, 0x6c, 0x1c, 0x4c, 0x1c, 0x8f, 0xed, 0xb3, 0x2b, 0x1f, 0x53, 0xf9, 0xb7, 0xe1, 0x07, 0x84, - 0x11, 0xbd, 0x3c, 0xe3, 0x36, 0x04, 0xbd, 0xba, 0x35, 0x22, 0x23, 0x22, 0x98, 0xfb, 0xfc, 0x4b, - 0xe2, 0xaa, 0xbb, 0x23, 0x42, 0x46, 0x2e, 0xde, 0x17, 0xa7, 0xc1, 0xf4, 0x7c, 0x9f, 0x39, 0x13, - 0x4c, 0x19, 0x9a, 0xf8, 0x0a, 0xf0, 0x28, 0x66, 0x66, 0x18, 0x5c, 0xf9, 0x8c, 0x70, 0x2c, 0x39, - 0x57, 0xec, 0x5a, 0x8c, 0x7d, 0x81, 0x03, 0xea, 0x10, 0x2f, 0xee, 0x47, 0xb5, 0xbe, 0xe0, 0xe5, - 0x05, 0x72, 0x1d, 0x1b, 0x31, 0x12, 0x48, 0x84, 0xf1, 0x13, 0x28, 0x75, 0x51, 0xc0, 0x7a, 0x98, - 0x1d, 0x61, 0x64, 0xe3, 0x40, 0xdf, 0x82, 0x0c, 0x23, 0x0c, 0xb9, 0x15, 0xad, 0xae, 0xed, 0x95, - 0x4c, 0x79, 0xd0, 0x75, 0x48, 0x8f, 0x11, 0x1d, 0x57, 0x92, 0x75, 0x6d, 0xaf, 0x68, 0x8a, 0x6f, - 0x63, 0x0c, 0x69, 0x2e, 0xca, 0x25, 0x1c, 0xcf, 0xc6, 0x97, 0xa1, 0x84, 0x38, 0x70, 0xea, 0xe0, - 0x8a, 0x61, 0xaa, 0x44, 0xe4, 0x41, 0xff, 0x21, 0x64, 0x84, 0xff, 0x95, 0x54, 0x5d, 0xdb, 0x2b, - 0x3c, 0xa9, 0x34, 0x62, 0x89, 0x92, 0xf1, 0x35, 0xba, 0x9c, 0xdf, 0x4a, 0x7f, 0xfe, 0xe5, 0x6e, - 0xc2, 0x94, 0x60, 0xc3, 0x85, 0x6c, 0xcb, 0x25, 0xc3, 0xe7, 0x9d, 0x83, 0xc8, 0x11, 0x6d, 0xe6, - 0x88, 0x7e, 0x02, 0x1b, 0x3e, 0x0a, 0x98, 0x45, 0x31, 0xb3, 0xc6, 0x22, 0x0a, 0x61, 0xb4, 0xf0, - 0x64, 0xb7, 0x71, 0xb7, 0x0e, 0x8d, 0xb9, 0x60, 0x95, 0x95, 0x92, 0x1f, 0x27, 0x1a, 0xff, 0x49, - 0xc3, 0x9a, 0x4a, 0xc6, 0x4f, 0x21, 0xab, 0xd2, 0x2a, 0x0c, 0x16, 0x9e, 0x3c, 0x8a, 0x6b, 0x54, - 0xac, 0x46, 0x9b, 0x78, 0x14, 0x7b, 0x74, 0x4a, 0x95, 0xbe, 0x50, 0x46, 0xff, 0x1e, 0xe4, 0x86, - 0x63, 0xe4, 0x78, 0x96, 0x63, 0x0b, 0x8f, 0xf2, 0xad, 0xc2, 0xed, 0x97, 0xbb, 0xd9, 0x36, 0xa7, - 0x75, 0x0e, 0xcc, 0xac, 0x60, 0x76, 0x6c, 0x7d, 0x1b, 0xd6, 0xc6, 0xd8, 0x19, 0x8d, 0x99, 0x48, - 0x4b, 0xca, 0x54, 0x27, 0xfd, 0xc7, 0x90, 0xe6, 0x0d, 0x51, 0x49, 0x0b, 0xdb, 0xd5, 0x86, 0xec, - 0x96, 0x46, 0xd8, 0x2d, 0x8d, 0x7e, 0xd8, 0x2d, 0xad, 0x1c, 0x37, 0xfc, 0xd9, 0x3f, 0x77, 0x35, - 0x53, 0x48, 0xe8, 0x6d, 0x28, 0xb9, 0x88, 0x32, 0x6b, 0xc0, 0xd3, 0xc6, 0xcd, 0x67, 0x84, 0x8a, - 0x9d, 0xc5, 0x84, 0xa8, 0xc4, 0x2a, 0xd7, 0x0b, 0x5c, 0x4a, 0x92, 0x6c, 0x7d, 0x0f, 0xca, 0x42, - 0xc9, 0x90, 0x4c, 0x26, 0x0e, 0xb3, 0x44, 0xde, 0xd7, 0x44, 0xde, 0xd7, 0x39, 0xbd, 0x2d, 0xc8, - 0x47, 0xbc, 0x02, 0x0f, 0x20, 0x6f, 0x23, 0x86, 0x24, 0x24, 0x2b, 0x20, 0x39, 0x4e, 0x10, 0xcc, - 0xb7, 0x60, 0x23, 0xea, 0x3a, 0x2a, 0x21, 0x39, 0xa9, 0x65, 0x46, 0x16, 0xc0, 0xc7, 0xb0, 0xe5, - 0xe1, 0x4b, 0x66, 0xdd, 0x45, 0xe7, 0x05, 0x5a, 0xe7, 0xbc, 0x67, 0xf3, 0x12, 0xdf, 0x85, 0xf5, - 0x61, 0x98, 0x7c, 0x89, 0x05, 0x81, 0x2d, 0x45, 0x54, 0x01, 0xdb, 0x81, 0x1c, 0xf2, 0x7d, 0x09, - 0x28, 0x08, 0x40, 0x16, 0xf9, 0xbe, 0x60, 0xbd, 0x03, 0x9b, 0x22, 0xc6, 0x00, 0xd3, 0xa9, 0xcb, - 0x94, 0x92, 0xa2, 0xc0, 0x6c, 0x70, 0x86, 0x29, 0xe9, 0x02, 0xfb, 0x6d, 0x28, 0xe1, 0x0b, 0xc7, - 0xc6, 0xde, 0x10, 0x4b, 0x5c, 0x49, 0xe0, 0x8a, 0x21, 0x51, 0x80, 0xde, 0x86, 0xb2, 0x1f, 0x10, - 0x9f, 0x50, 0x1c, 0x58, 0xc8, 0xb6, 0x03, 0x4c, 0x69, 0x65, 0x5d, 0xea, 0x0b, 0xe9, 0x4d, 0x49, - 0x36, 0x7e, 0x93, 0x84, 0xf4, 0x01, 0x62, 0x48, 0x2f, 0x43, 0x8a, 0x5d, 0xd2, 0x8a, 0x56, 0x4f, - 0xed, 0x15, 0x4d, 0xfe, 0xa9, 0x8f, 0xa1, 0xe2, 0x78, 0x0c, 0x07, 0x13, 0x6c, 0x3b, 0x88, 0x61, - 0x8b, 0x32, 0xfe, 0x37, 0x20, 0x84, 0x51, 0xd5, 0xdb, 0x7b, 0x8b, 0xa5, 0xec, 0xc4, 0x24, 0x7a, - 0x5c, 0xc0, 0xe4, 0x78, 0x55, 0xd9, 0x6d, 0x67, 0x29, 0x57, 0xff, 0x08, 0x72, 0xa1, 0xff, 0x6a, - 0x28, 0x6b, 0x8b, 0x9a, 0x0f, 0x15, 0x82, 0x7b, 0xab, 0xf4, 0x45, 0x52, 0xfa, 0x07, 0x90, 0x9b, - 0x60, 0x4a, 0xd1, 0x08, 0xd3, 0xa8, 0x53, 0x17, 0x34, 0x9c, 0x28, 0x44, 0x28, 0x1d, 0x4a, 0x18, - 0x53, 0xb8, 0x77, 0x30, 0xf5, 0x5d, 0x67, 0x88, 0x18, 0x7e, 0x46, 0x18, 0x0e, 0x4d, 0xe9, 0xef, - 0xc1, 0xda, 0x05, 0x61, 0xd8, 0x42, 0x6a, 0xf4, 0xb6, 0x17, 0x95, 0x72, 0xbc, 0x99, 0xe1, 0xa8, - 0x66, 0x04, 0x1f, 0xa8, 0xfc, 0x7c, 0x25, 0xbc, 0x65, 0xfc, 0x4e, 0x83, 0x9d, 0x63, 0x3e, 0x64, - 0x6d, 0xd7, 0xc1, 0x1e, 0x6b, 0x32, 0x86, 0x86, 0xcf, 0x23, 0xdb, 0x1d, 0xd8, 0x1c, 0x12, 0xef, - 0xdc, 0x75, 0x86, 0xcc, 0xf1, 0x46, 0x72, 0x8a, 0x94, 0x1b, 0x0f, 0x17, 0xf5, 0x0a, 0x3d, 0x62, - 0x68, 0xcc, 0x72, 0x4c, 0x4c, 0x50, 0x78, 0xd3, 0xf0, 0xf9, 0x21, 0x9e, 0xa5, 0x46, 0x3c, 0x29, - 0x46, 0xbc, 0x28, 0x89, 0x47, 0x82, 0x66, 0xfc, 0x5b, 0x83, 0x5c, 0x64, 0x1c, 0xc1, 0x7d, 0x3b, - 0xcc, 0x88, 0x25, 0x62, 0x8a, 0x0a, 0x24, 0x5d, 0x78, 0x6b, 0xd1, 0x85, 0xa5, 0x29, 0x3c, 0x4a, - 0x98, 0xf7, 0xec, 0xa5, 0xb9, 0xf5, 0xe0, 0xa1, 0xcb, 0x0d, 0x5b, 0x43, 0x11, 0xbd, 0x85, 0x44, - 0xf8, 0x33, 0x3b, 0x32, 0x85, 0xef, 0xae, 0x08, 0x75, 0x59, 0xca, 0x8e, 0x12, 0xe6, 0x8e, 0xbb, - 0x8a, 0xd9, 0xca, 0x40, 0x8a, 0x4e, 0x27, 0xc6, 0x31, 0x14, 0xe3, 0x9d, 0xc4, 0x3b, 0x27, 0x16, - 0x5a, 0x6a, 0x79, 0xe7, 0x44, 0x4a, 0xee, 0xf4, 0x9d, 0xf1, 0x21, 0x6c, 0x2f, 0xef, 0x78, 0xfd, - 0x3b, 0xb0, 0x1e, 0xa0, 0x17, 0x72, 0x5c, 0x2c, 0xd7, 0xa1, 0x4c, 0x8d, 0x56, 0x31, 0x40, 0x2f, - 0x04, 0xe2, 0xd8, 0xa1, 0xcc, 0xf8, 0x19, 0xe4, 0xc2, 0xae, 0xd4, 0x3f, 0x84, 0x52, 0xd8, 0x91, - 0x33, 0x81, 0xa5, 0xf7, 0xa5, 0x12, 0x31, 0x8b, 0x21, 0x5e, 0xe8, 0xfa, 0x08, 0xb2, 0x8a, 0xa1, - 0x7f, 0x0b, 0x8a, 0x1e, 0x9a, 0x60, 0xea, 0xa3, 0x21, 0xe6, 0x37, 0xaf, 0xdc, 0x54, 0x85, 0x88, - 0xd6, 0xb1, 0xf9, 0x12, 0xe3, 0xb7, 0x63, 0xb8, 0x4d, 0xf9, 0xb7, 0xf1, 0x0b, 0xd8, 0xe6, 0x39, - 0x69, 0x5e, 0x20, 0xc7, 0x45, 0x03, 0xc7, 0x75, 0xd8, 0x95, 0x5a, 0x42, 0x0f, 0x20, 0x1f, 0x10, - 0x15, 0x8d, 0x0a, 0x24, 0x17, 0x10, 0x19, 0x08, 0xb7, 0x36, 0x24, 0xee, 0x74, 0xe2, 0x45, 0x97, - 0x03, 0xe7, 0x17, 0x24, 0x4d, 0x40, 0x8c, 0xff, 0x26, 0x21, 0xcd, 0xab, 0xaf, 0xbf, 0x0f, 0x69, - 0x1e, 0x83, 0xf0, 0x68, 0x7d, 0xd9, 0x72, 0xec, 0x39, 0x23, 0x0f, 0xdb, 0x27, 0x74, 0xd4, 0xbf, - 0xf2, 0xb1, 0x29, 0xc0, 0xb1, 0xdd, 0x94, 0x9c, 0xdb, 0x4d, 0x5b, 0x90, 0x09, 0xc8, 0xd4, 0xb3, - 0xc5, 0xa5, 0x91, 0x31, 0xe5, 0x41, 0x3f, 0x84, 0x5c, 0xb4, 0x72, 0xd2, 0xff, 0x6f, 0xe5, 0x6c, - 0xf0, 0x82, 0xf2, 0x85, 0xa8, 0x08, 0x66, 0x76, 0xa0, 0x36, 0x4f, 0x0b, 0xf2, 0xd1, 0x4b, 0x48, - 0xad, 0xae, 0xaf, 0xb7, 0xfd, 0x66, 0x62, 0xfa, 0xbb, 0xb0, 0x19, 0x2d, 0x92, 0xe8, 0x26, 0x96, - 0xeb, 0xab, 0x1c, 0x31, 0xd4, 0x55, 0x3c, 0xb7, 0xa3, 0x2c, 0xf9, 0x9a, 0xc9, 0x8a, 0xb8, 0x66, - 0x3b, 0xaa, 0x23, 0x9e, 0x35, 0x0f, 0x21, 0x4f, 0x9d, 0x91, 0x87, 0xd8, 0x34, 0xc0, 0x6a, 0x8d, - 0xcd, 0x08, 0xc6, 0x5f, 0x34, 0x58, 0x93, 0x6b, 0x31, 0x96, 0x37, 0x6d, 0x79, 0xde, 0x92, 0xab, - 0xf2, 0x96, 0x7a, 0xfd, 0xbc, 0x35, 0x01, 0x22, 0x67, 0xf8, 0x65, 0xcc, 0x7b, 0xf8, 0xc1, 0xa2, - 0x22, 0xe9, 0x62, 0xcf, 0x19, 0xa9, 0x99, 0x8a, 0x09, 0x19, 0xff, 0xd0, 0x20, 0x1f, 0xf1, 0xf5, - 0x26, 0x94, 0x42, 0xbf, 0xac, 0x73, 0x17, 0x8d, 0x54, 0xef, 0x3c, 0x5a, 0xe9, 0xdc, 0xc7, 0x2e, - 0x1a, 0x99, 0x05, 0xe5, 0x0f, 0x3f, 0x2c, 0xaf, 0x43, 0x72, 0x45, 0x1d, 0xe6, 0x0a, 0x9f, 0x7a, - 0xbd, 0xc2, 0xcf, 0x95, 0x28, 0x7d, 0xb7, 0x44, 0x7f, 0x4e, 0x42, 0xae, 0x2b, 0x16, 0x31, 0x72, - 0xbf, 0x89, 0x89, 0x78, 0x00, 0x79, 0x9f, 0xb8, 0x96, 0xe4, 0xa4, 0x05, 0x27, 0xe7, 0x13, 0xd7, - 0x5c, 0x28, 0x7b, 0xe6, 0x0d, 0x8d, 0xcb, 0xda, 0x1b, 0xc8, 0x5a, 0xf6, 0x6e, 0xd6, 0x02, 0x28, - 0xca, 0x54, 0xa8, 0x3b, 0xe9, 0x31, 0xcf, 0x81, 0x78, 0x69, 0x6b, 0x8b, 0x0f, 0x79, 0xe9, 0xb6, - 0x44, 0x9a, 0x0a, 0xc7, 0x25, 0xe4, 0x3b, 0x52, 0x2d, 0x97, 0xca, 0xaa, 0xb6, 0x34, 0x15, 0xce, - 0xf8, 0xbd, 0x06, 0x30, 0x5b, 0xad, 0xfc, 0x49, 0x4b, 0x85, 0x0b, 0xd6, 0x9c, 0xe5, 0xda, 0xaa, - 0xa2, 0x29, 0xfb, 0x45, 0x1a, 0xf7, 0xbb, 0x0d, 0xa5, 0x59, 0x33, 0x52, 0x1c, 0x3a, 0xb3, 0x44, - 0x49, 0xf4, 0xd2, 0xec, 0x61, 0x66, 0x16, 0x2f, 0x62, 0x27, 0xe3, 0xaf, 0x1a, 0xe4, 0x85, 0x4f, - 0x27, 0x98, 0xa1, 0xb9, 0x1a, 0x6a, 0xaf, 0x5f, 0xc3, 0x47, 0x00, 0x52, 0x0d, 0x75, 0x5e, 0x62, - 0xd5, 0x59, 0x79, 0x41, 0xe9, 0x39, 0x2f, 0xb1, 0xfe, 0xa3, 0x28, 0xe1, 0xa9, 0xaf, 0x4e, 0xb8, - 0x1a, 0xe9, 0x30, 0xed, 0xf7, 0x21, 0xeb, 0x4d, 0x27, 0x16, 0x7f, 0x5e, 0xa6, 0x65, 0xb7, 0x7a, - 0xd3, 0x49, 0xff, 0x92, 0x1a, 0xbf, 0x86, 0x6c, 0xff, 0x52, 0xfc, 0xd6, 0x92, 0x0b, 0x86, 0xa8, - 0x07, 0xbe, 0x5c, 0x57, 0x39, 0x4e, 0x10, 0xef, 0xd9, 0x25, 0xbb, 0x4a, 0x6f, 0x7c, 0xcd, 0x5f, - 0x71, 0xea, 0xf7, 0xdb, 0x3b, 0x7f, 0xd3, 0xa0, 0x10, 0xbb, 0x1f, 0xf4, 0x1f, 0xc0, 0xbd, 0xd6, - 0xf1, 0x59, 0xfb, 0x13, 0xab, 0x73, 0x60, 0x7d, 0x7c, 0xdc, 0x7c, 0x6a, 0x7d, 0x7a, 0xfa, 0xc9, - 0xe9, 0xd9, 0xcf, 0x4f, 0xcb, 0x89, 0xea, 0xf6, 0xf5, 0x4d, 0x5d, 0x8f, 0x61, 0x3f, 0xf5, 0x9e, - 0x7b, 0xe4, 0x85, 0xa7, 0xef, 0xc3, 0xd6, 0xbc, 0x48, 0xb3, 0xd5, 0x3b, 0x3c, 0xed, 0x97, 0xb5, - 0xea, 0xbd, 0xeb, 0x9b, 0xfa, 0x66, 0x4c, 0xa2, 0x39, 0xa0, 0xd8, 0x63, 0x8b, 0x02, 0xed, 0xb3, - 0x93, 0x93, 0x4e, 0xbf, 0x9c, 0x5c, 0x10, 0x50, 0x17, 0xf6, 0xdb, 0xb0, 0x39, 0x2f, 0x70, 0xda, - 0x39, 0x2e, 0xa7, 0xaa, 0xfa, 0xf5, 0x4d, 0x7d, 0x3d, 0x86, 0x3e, 0x75, 0xdc, 0x6a, 0xee, 0xb7, - 0x7f, 0xa8, 0x25, 0xfe, 0xf4, 0xc7, 0x9a, 0xc6, 0x23, 0x2b, 0xcd, 0xdd, 0x11, 0xfa, 0xf7, 0xe1, - 0x7e, 0xaf, 0xf3, 0xf4, 0xf4, 0xf0, 0xc0, 0x3a, 0xe9, 0x3d, 0xb5, 0xfa, 0xbf, 0xec, 0x1e, 0xc6, - 0xa2, 0xdb, 0xb8, 0xbe, 0xa9, 0x17, 0x54, 0x48, 0xab, 0xd0, 0x5d, 0xf3, 0xf0, 0xd9, 0x59, 0xff, - 0xb0, 0xac, 0x49, 0x74, 0x37, 0xc0, 0xfc, 0x0d, 0x28, 0xd0, 0x8f, 0x61, 0x67, 0x09, 0x3a, 0x0a, - 0x6c, 0xf3, 0xfa, 0xa6, 0x5e, 0xea, 0x06, 0x58, 0xce, 0x8f, 0x90, 0x68, 0x40, 0x65, 0x51, 0xe2, - 0xac, 0x7b, 0xd6, 0x6b, 0x1e, 0x97, 0xeb, 0xd5, 0xf2, 0xf5, 0x4d, 0xbd, 0x18, 0x5e, 0x86, 0x1c, - 0x3f, 0x8b, 0xac, 0xf5, 0xec, 0xf3, 0xdb, 0x9a, 0xf6, 0xc5, 0x6d, 0x4d, 0xfb, 0xd7, 0x6d, 0x4d, - 0xfb, 0xec, 0x55, 0x2d, 0xf1, 0xc5, 0xab, 0x5a, 0xe2, 0xef, 0xaf, 0x6a, 0x89, 0x5f, 0x7d, 0x30, - 0x72, 0xd8, 0x78, 0x3a, 0x68, 0x0c, 0xc9, 0x64, 0xdf, 0x45, 0x2f, 0xaf, 0x5c, 0x6c, 0x8f, 0x70, - 0x10, 0xfb, 0x7c, 0x6f, 0x48, 0x02, 0xf5, 0x3f, 0x8d, 0xfd, 0xbb, 0xff, 0x80, 0x18, 0xac, 0x09, - 0xfa, 0xfb, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x01, 0xce, 0x2f, 0x41, 0x11, 0x00, 0x00, + // 1786 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, + 0x11, 0xd6, 0x90, 0x94, 0x48, 0x16, 0x49, 0x89, 0xea, 0xc8, 0x32, 0x45, 0xdb, 0x14, 0x33, 0x79, + 0xac, 0xf6, 0x45, 0x39, 0xde, 0x20, 0x48, 0x80, 0xcd, 0x62, 0x49, 0x49, 0x6b, 0x33, 0xab, 0x07, + 0x31, 0xd4, 0x2a, 0x8f, 0xcb, 0xa0, 0xc9, 0x69, 0x93, 0x13, 0x0f, 0xa7, 0x07, 0xd3, 0x4d, 0x59, + 0xf2, 0x31, 0xa7, 0x40, 0x27, 0x9f, 0x72, 0xd3, 0x29, 0x39, 0xe4, 0x9e, 0x3f, 0x10, 0xe4, 0xb4, + 0x97, 0x00, 0x7b, 0x4b, 0x2e, 0xd9, 0x04, 0x76, 0x10, 0xe4, 0x67, 0x04, 0xfd, 0x98, 0xe1, 0x50, + 0x24, 0x1d, 0xc3, 0x30, 0xf6, 0x42, 0xcc, 0x54, 0x7d, 0x5f, 0x77, 0x3d, 0xba, 0xaa, 0x7a, 0x08, + 0x77, 0x39, 0xf1, 0x1d, 0x12, 0x8e, 0x5c, 0x9f, 0xef, 0xf2, 0xcb, 0x80, 0x30, 0xf5, 0xdb, 0x08, + 0x42, 0xca, 0x29, 0x2a, 0x4f, 0xb4, 0x0d, 0x29, 0xaf, 0x6e, 0x0c, 0xe8, 0x80, 0x4a, 0xe5, 0xae, + 0x78, 0x52, 0xb8, 0xea, 0xf6, 0x80, 0xd2, 0x81, 0x47, 0x76, 0xe5, 0x5b, 0x6f, 0xfc, 0x78, 0x97, + 0xbb, 0x23, 0xc2, 0x38, 0x1e, 0x05, 0x1a, 0x70, 0x2f, 0xb1, 0x4d, 0x3f, 0xbc, 0x0c, 0x38, 0x15, + 0x58, 0xfa, 0x58, 0xab, 0x6b, 0x09, 0xf5, 0x39, 0x09, 0x99, 0x4b, 0xfd, 0xa4, 0x1d, 0xd5, 0xfa, + 0x8c, 0x95, 0xe7, 0xd8, 0x73, 0x1d, 0xcc, 0x69, 0xa8, 0x10, 0xe6, 0x4f, 0xa0, 0xd4, 0xc1, 0x21, + 0xef, 0x12, 0xfe, 0x88, 0x60, 0x87, 0x84, 0x68, 0x03, 0x96, 0x39, 0xe5, 0xd8, 0xab, 0x18, 0x75, + 0x63, 0xa7, 0x64, 0xa9, 0x17, 0x84, 0x20, 0x33, 0xc4, 0x6c, 0x58, 0x49, 0xd5, 0x8d, 0x9d, 0xa2, + 0x25, 0x9f, 0xcd, 0x21, 0x64, 0x04, 0x55, 0x30, 0x5c, 0xdf, 0x21, 0x17, 0x11, 0x43, 0xbe, 0x08, + 0x69, 0xef, 0x92, 0x13, 0xa6, 0x29, 0xea, 0x05, 0xfd, 0x10, 0x96, 0xa5, 0xfd, 0x95, 0x74, 0xdd, + 0xd8, 0x29, 0x3c, 0xa8, 0x34, 0x12, 0x81, 0x52, 0xfe, 0x35, 0x3a, 0x42, 0xdf, 0xca, 0x7c, 0xf9, + 0xf5, 0xf6, 0x92, 0xa5, 0xc0, 0xa6, 0x07, 0xd9, 0x96, 0x47, 0xfb, 0x4f, 0xda, 0xfb, 0xb1, 0x21, + 0xc6, 0xc4, 0x10, 0x74, 0x04, 0x6b, 0x01, 0x0e, 0xb9, 0xcd, 0x08, 0xb7, 0x87, 0xd2, 0x0b, 0xb9, + 0x69, 0xe1, 0xc1, 0x76, 0xe3, 0x66, 0x1e, 0x1a, 0x53, 0xce, 0xea, 0x5d, 0x4a, 0x41, 0x52, 0x68, + 0xfe, 0x27, 0x03, 0x2b, 0x3a, 0x18, 0x3f, 0x85, 0xac, 0x0e, 0xab, 0xdc, 0xb0, 0xf0, 0xe0, 0x5e, + 0x72, 0x45, 0xad, 0x6a, 0xec, 0x51, 0x9f, 0x11, 0x9f, 0x8d, 0x99, 0x5e, 0x2f, 0xe2, 0xa0, 0xef, + 0x43, 0xae, 0x3f, 0xc4, 0xae, 0x6f, 0xbb, 0x8e, 0xb4, 0x28, 0xdf, 0x2a, 0xbc, 0xf8, 0x7a, 0x3b, + 0xbb, 0x27, 0x64, 0xed, 0x7d, 0x2b, 0x2b, 0x95, 0x6d, 0x07, 0x6d, 0xc2, 0xca, 0x90, 0xb8, 0x83, + 0x21, 0x97, 0x61, 0x49, 0x5b, 0xfa, 0x0d, 0xfd, 0x18, 0x32, 0xe2, 0x40, 0x54, 0x32, 0x72, 0xef, + 0x6a, 0x43, 0x9d, 0x96, 0x46, 0x74, 0x5a, 0x1a, 0xa7, 0xd1, 0x69, 0x69, 0xe5, 0xc4, 0xc6, 0xcf, + 0xff, 0xb9, 0x6d, 0x58, 0x92, 0x81, 0xf6, 0xa0, 0xe4, 0x61, 0xc6, 0xed, 0x9e, 0x08, 0x9b, 0xd8, + 0x7e, 0x59, 0x2e, 0xb1, 0x35, 0x1b, 0x10, 0x1d, 0x58, 0x6d, 0x7a, 0x41, 0xb0, 0x94, 0xc8, 0x41, + 0x3b, 0x50, 0x96, 0x8b, 0xf4, 0xe9, 0x68, 0xe4, 0x72, 0x5b, 0xc6, 0x7d, 0x45, 0xc6, 0x7d, 0x55, + 0xc8, 0xf7, 0xa4, 0xf8, 0x91, 0xc8, 0xc0, 0x1d, 0xc8, 0x3b, 0x98, 0x63, 0x05, 0xc9, 0x4a, 0x48, + 0x4e, 0x08, 0xa4, 0xf2, 0x1d, 0x58, 0x8b, 0x4f, 0x1d, 0x53, 0x90, 0x9c, 0x5a, 0x65, 0x22, 0x96, + 0xc0, 0xfb, 0xb0, 0xe1, 0x93, 0x0b, 0x6e, 0xdf, 0x44, 0xe7, 0x25, 0x1a, 0x09, 0xdd, 0xd9, 0x34, + 0xe3, 0x7b, 0xb0, 0xda, 0x8f, 0x82, 0xaf, 0xb0, 0x20, 0xb1, 0xa5, 0x58, 0x2a, 0x61, 0x5b, 0x90, + 0xc3, 0x41, 0xa0, 0x00, 0x05, 0x09, 0xc8, 0xe2, 0x20, 0x90, 0xaa, 0xf7, 0x60, 0x5d, 0xfa, 0x18, + 0x12, 0x36, 0xf6, 0xb8, 0x5e, 0xa4, 0x28, 0x31, 0x6b, 0x42, 0x61, 0x29, 0xb9, 0xc4, 0x7e, 0x07, + 0x4a, 0xe4, 0xdc, 0x75, 0x88, 0xdf, 0x27, 0x0a, 0x57, 0x92, 0xb8, 0x62, 0x24, 0x94, 0xa0, 0x77, + 0xa1, 0x1c, 0x84, 0x34, 0xa0, 0x8c, 0x84, 0x36, 0x76, 0x9c, 0x90, 0x30, 0x56, 0x59, 0x55, 0xeb, + 0x45, 0xf2, 0xa6, 0x12, 0x9b, 0xbf, 0x49, 0x41, 0x66, 0x1f, 0x73, 0x8c, 0xca, 0x90, 0xe6, 0x17, + 0xac, 0x62, 0xd4, 0xd3, 0x3b, 0x45, 0x4b, 0x3c, 0xa2, 0x21, 0x54, 0x5c, 0x9f, 0x93, 0x70, 0x44, + 0x1c, 0x17, 0x73, 0x62, 0x33, 0x2e, 0x7e, 0x43, 0x4a, 0x39, 0xd3, 0x67, 0x7b, 0x67, 0x36, 0x95, + 0xed, 0x04, 0xa3, 0x2b, 0x08, 0x96, 0xc0, 0xeb, 0xcc, 0x6e, 0xba, 0x73, 0xb5, 0xe8, 0x53, 0xc8, + 0x45, 0xf6, 0xeb, 0xa2, 0xac, 0xcd, 0xae, 0x7c, 0xa0, 0x11, 0x87, 0x2e, 0xe3, 0x7a, 0xbd, 0x98, + 0x85, 0x3e, 0x86, 0xdc, 0x88, 0x30, 0x86, 0x07, 0x84, 0xc5, 0x27, 0x75, 0x66, 0x85, 0x23, 0x8d, + 0x88, 0xd8, 0x11, 0xc3, 0xfc, 0xb7, 0x01, 0xb9, 0x68, 0x79, 0x84, 0xe1, 0xb6, 0x33, 0x0e, 0x3c, + 0xb7, 0x2f, 0xbc, 0x3d, 0xa7, 0x9c, 0xd8, 0xb1, 0x6d, 0xaa, 0xfe, 0xde, 0x99, 0x5d, 0x79, 0x3f, + 0x22, 0x9c, 0x51, 0x4e, 0xa2, 0x95, 0x1e, 0x2d, 0x59, 0xb7, 0x9c, 0x79, 0x0a, 0xe4, 0xc3, 0x5d, + 0x4f, 0x14, 0x97, 0xdd, 0xf7, 0x5c, 0xe2, 0x73, 0x1b, 0x73, 0x8e, 0xfb, 0x4f, 0x26, 0xfb, 0xa8, + 0xe8, 0xbe, 0x3f, 0xbb, 0xcf, 0xa1, 0x60, 0xed, 0x49, 0x52, 0x53, 0x72, 0x12, 0x7b, 0x6d, 0x79, + 0x8b, 0x94, 0xad, 0x65, 0x48, 0xb3, 0xf1, 0xc8, 0x7c, 0x9e, 0x82, 0x5b, 0x73, 0x2d, 0x45, 0x1f, + 0xc2, 0x8a, 0xf4, 0x14, 0x6b, 0x17, 0x37, 0x67, 0xb7, 0x16, 0x78, 0x6b, 0x59, 0xa0, 0x9a, 0x31, + 0xbc, 0xa7, 0x2d, 0x7d, 0x25, 0xbc, 0x85, 0x3e, 0x00, 0x24, 0x3b, 0xb8, 0x88, 0xa6, 0xeb, 0x0f, + 0xec, 0x80, 0x3e, 0x25, 0xa1, 0x6e, 0x33, 0x65, 0xa9, 0x39, 0x93, 0x8a, 0x8e, 0x90, 0x4f, 0x95, + 0xaa, 0x86, 0x66, 0x24, 0x74, 0x52, 0xaa, 0x0a, 0xd8, 0x82, 0x7c, 0x3c, 0xaa, 0x74, 0x6f, 0x79, + 0xbd, 0xf6, 0x34, 0xa1, 0x99, 0x7f, 0x4d, 0xc1, 0xd6, 0xc2, 0xa0, 0xa2, 0x36, 0xac, 0xf7, 0xa9, + 0xff, 0xd8, 0x73, 0xfb, 0xd2, 0x6e, 0xd9, 0xc8, 0x74, 0x84, 0xee, 0x2e, 0x48, 0x8e, 0xec, 0x5b, + 0x56, 0x39, 0x41, 0x93, 0x12, 0x51, 0xb7, 0xa2, 0x85, 0x51, 0xdf, 0xd6, 0x5d, 0x36, 0x25, 0x7d, + 0x2a, 0x2a, 0xe1, 0x23, 0xd5, 0x6b, 0x8f, 0x61, 0xa3, 0x77, 0xf9, 0x0c, 0xfb, 0xdc, 0xf5, 0x49, + 0xa2, 0x03, 0x55, 0xd2, 0xf5, 0xf4, 0x4e, 0xe1, 0xc1, 0x9d, 0x39, 0x51, 0x8e, 0x30, 0xd6, 0xb7, + 0x62, 0xe2, 0xa4, 0x3d, 0x2d, 0x08, 0x7c, 0x66, 0x41, 0xe0, 0xdf, 0x46, 0x3c, 0x0f, 0xa1, 0x98, + 0xac, 0x53, 0x51, 0x97, 0x89, 0xea, 0x49, 0xcf, 0xaf, 0xcb, 0xf8, 0x9c, 0xde, 0xa8, 0x6a, 0xf3, + 0x13, 0xd8, 0x9c, 0xdf, 0x4f, 0xd0, 0x77, 0x61, 0x35, 0xc4, 0x4f, 0x55, 0x33, 0xb2, 0x3d, 0x97, + 0x71, 0xdd, 0xb8, 0x8a, 0x21, 0x7e, 0x2a, 0x11, 0x62, 0x77, 0xf3, 0x67, 0x90, 0x8b, 0x6a, 0x1e, + 0x7d, 0x02, 0xa5, 0xa8, 0xde, 0x27, 0x84, 0xb9, 0xd3, 0x48, 0x53, 0xac, 0x62, 0x84, 0x97, 0x6b, + 0x7d, 0x0a, 0x59, 0xad, 0x40, 0xdf, 0x86, 0xa2, 0x8f, 0x47, 0x84, 0x05, 0xb8, 0x4f, 0xc4, 0x5c, + 0x53, 0xf7, 0x80, 0x42, 0x2c, 0x6b, 0x3b, 0xe2, 0x8a, 0x20, 0x66, 0x4f, 0x74, 0x57, 0x11, 0xcf, + 0xe6, 0x2f, 0x60, 0x53, 0x74, 0xda, 0xe6, 0x39, 0x76, 0x3d, 0xdc, 0x73, 0x3d, 0x97, 0x5f, 0xea, + 0x11, 0x7f, 0x07, 0xf2, 0x21, 0xd5, 0xde, 0x68, 0x47, 0x72, 0x21, 0x55, 0x8e, 0x88, 0xdd, 0xfa, + 0xd4, 0x1b, 0x8f, 0xfc, 0xb8, 0xf5, 0x0a, 0x7d, 0x41, 0xc9, 0x24, 0xc4, 0xfc, 0x6f, 0x0a, 0x32, + 0xa2, 0xe0, 0xd0, 0x47, 0x90, 0x11, 0x3e, 0x48, 0x8b, 0x56, 0xe7, 0x5d, 0x3d, 0xba, 0xee, 0xc0, + 0x27, 0xce, 0x11, 0x1b, 0x9c, 0x5e, 0x06, 0xc4, 0x92, 0xe0, 0xc4, 0xe4, 0x4f, 0x4d, 0x4d, 0xfe, + 0x0d, 0x58, 0x0e, 0xe9, 0xd8, 0x77, 0x64, 0xa5, 0x2e, 0x5b, 0xea, 0x05, 0x1d, 0x40, 0x2e, 0x1e, + 0xe8, 0x99, 0xff, 0x37, 0xd0, 0xd7, 0x44, 0x42, 0xc5, 0x75, 0x43, 0x0b, 0xac, 0x6c, 0x4f, 0xcf, + 0xf5, 0xb7, 0x70, 0xd8, 0xd0, 0xfb, 0xb0, 0x3e, 0xe9, 0x14, 0xd1, 0x9c, 0x53, 0x97, 0x83, 0x72, + 0xac, 0xd0, 0x83, 0x6e, 0xba, 0xad, 0xa8, 0xbb, 0x62, 0x56, 0xfa, 0x35, 0x69, 0x2b, 0x6d, 0x79, + 0x69, 0xbc, 0x0b, 0x79, 0xe6, 0x0e, 0x7c, 0xcc, 0xc7, 0x21, 0xd1, 0x97, 0x84, 0x89, 0xc0, 0xfc, + 0xb3, 0x01, 0x2b, 0xea, 0xd2, 0x91, 0x88, 0x9b, 0x31, 0x3f, 0x6e, 0xa9, 0x45, 0x71, 0x4b, 0xbf, + 0x79, 0xdc, 0x9a, 0x00, 0xb1, 0x31, 0x62, 0xd4, 0x2d, 0x68, 0x0c, 0xca, 0xc4, 0xae, 0x3b, 0xd0, + 0x35, 0x95, 0x20, 0x99, 0xff, 0x30, 0x20, 0x1f, 0xeb, 0x51, 0x13, 0x4a, 0x91, 0x5d, 0xf6, 0x63, + 0x0f, 0x0f, 0xf4, 0xd9, 0xb9, 0xb7, 0xd0, 0xb8, 0xcf, 0x3c, 0x3c, 0xb0, 0x0a, 0xda, 0x1e, 0xf1, + 0x32, 0x3f, 0x0f, 0xa9, 0x05, 0x79, 0x98, 0x4a, 0x7c, 0xfa, 0xcd, 0x12, 0x3f, 0x95, 0xa2, 0xcc, + 0xcd, 0x14, 0xfd, 0x29, 0x05, 0xb9, 0x8e, 0xbc, 0xe6, 0x60, 0xef, 0x9b, 0xa8, 0x88, 0x3b, 0x90, + 0x0f, 0xa8, 0x67, 0x2b, 0x4d, 0x46, 0x6a, 0x72, 0x01, 0xf5, 0xac, 0x99, 0xb4, 0x2f, 0xbf, 0xa5, + 0x72, 0x59, 0x79, 0x0b, 0x51, 0xcb, 0xde, 0x8c, 0x5a, 0x08, 0x45, 0x15, 0x0a, 0xdd, 0x93, 0xee, + 0x8b, 0x18, 0xc8, 0xef, 0x18, 0x63, 0xf6, 0x33, 0x49, 0x99, 0xad, 0x90, 0x96, 0xc6, 0x09, 0x86, + 0xba, 0xa5, 0xeb, 0x5b, 0x41, 0x65, 0xd1, 0xb1, 0xb4, 0x34, 0xce, 0xfc, 0x9d, 0x01, 0x30, 0x99, + 0x9a, 0xe2, 0x83, 0x81, 0x49, 0x13, 0xec, 0xa9, 0x9d, 0x6b, 0x8b, 0x92, 0xa6, 0xf7, 0x2f, 0xb2, + 0xa4, 0xdd, 0x7b, 0x50, 0x9a, 0x1c, 0x46, 0x46, 0x22, 0x63, 0x6a, 0xaf, 0x18, 0x9e, 0x5d, 0xc2, + 0xad, 0xe2, 0x79, 0xe2, 0xcd, 0xfc, 0x8b, 0x01, 0x79, 0x69, 0xd3, 0x11, 0xe1, 0x78, 0x2a, 0x87, + 0xc6, 0x9b, 0xe7, 0xf0, 0x1e, 0x80, 0x5a, 0x86, 0xb9, 0xcf, 0x88, 0x3e, 0x59, 0x79, 0x29, 0xe9, + 0xba, 0xcf, 0x08, 0xfa, 0x51, 0x1c, 0xf0, 0xf4, 0xab, 0x03, 0xae, 0x4b, 0x3a, 0x0a, 0xfb, 0x6d, + 0xc8, 0xfa, 0xe3, 0x91, 0x2d, 0x2e, 0xef, 0x6a, 0xb2, 0xaf, 0xf8, 0xe3, 0xd1, 0xe9, 0x05, 0x33, + 0x7f, 0x0d, 0xd9, 0xd3, 0x0b, 0xf9, 0x25, 0xab, 0x06, 0x0c, 0xd5, 0x9f, 0x4f, 0x6a, 0x5c, 0xe5, + 0x84, 0x40, 0x7e, 0x2d, 0xcc, 0x99, 0x55, 0xa8, 0xf1, 0x9a, 0xdf, 0xc8, 0xfa, 0xeb, 0xf8, 0xbd, + 0xbf, 0x19, 0x50, 0x48, 0xf4, 0x07, 0xf4, 0x03, 0xb8, 0xd5, 0x3a, 0x3c, 0xd9, 0xfb, 0xdc, 0x6e, + 0xef, 0xdb, 0x9f, 0x1d, 0x36, 0x1f, 0xda, 0x5f, 0x1c, 0x7f, 0x7e, 0x7c, 0xf2, 0xf3, 0xe3, 0xf2, + 0x52, 0x75, 0xf3, 0xea, 0xba, 0x8e, 0x12, 0xd8, 0x2f, 0xfc, 0x27, 0x3e, 0x7d, 0xea, 0xa3, 0x5d, + 0xd8, 0x98, 0xa6, 0x34, 0x5b, 0xdd, 0x83, 0xe3, 0xd3, 0xb2, 0x51, 0xbd, 0x75, 0x75, 0x5d, 0x5f, + 0x4f, 0x30, 0x9a, 0x3d, 0x46, 0x7c, 0x3e, 0x4b, 0xd8, 0x3b, 0x39, 0x3a, 0x6a, 0x9f, 0x96, 0x53, + 0x33, 0x04, 0xdd, 0xb0, 0xdf, 0x85, 0xf5, 0x69, 0xc2, 0x71, 0xfb, 0xb0, 0x9c, 0xae, 0xa2, 0xab, + 0xeb, 0xfa, 0x6a, 0x02, 0x7d, 0xec, 0x7a, 0xd5, 0xdc, 0x6f, 0x7f, 0x5f, 0x5b, 0xfa, 0xe3, 0x1f, + 0x6a, 0x86, 0xf0, 0xac, 0x34, 0xd5, 0x23, 0xd0, 0x07, 0x70, 0xbb, 0xdb, 0x7e, 0x78, 0x7c, 0xb0, + 0x6f, 0x1f, 0x75, 0x1f, 0xda, 0xa7, 0xbf, 0xec, 0x1c, 0x24, 0xbc, 0x5b, 0xbb, 0xba, 0xae, 0x17, + 0xb4, 0x4b, 0x8b, 0xd0, 0x1d, 0xeb, 0xe0, 0xec, 0xe4, 0xf4, 0xa0, 0x6c, 0x28, 0x74, 0x27, 0x24, + 0xe2, 0x9e, 0x2c, 0xd1, 0xf7, 0x61, 0x6b, 0x0e, 0x3a, 0x76, 0x6c, 0xfd, 0xea, 0xba, 0x5e, 0xea, + 0x84, 0x44, 0xd5, 0x8f, 0x64, 0x34, 0xa0, 0x32, 0xcb, 0x38, 0xe9, 0x9c, 0x74, 0x9b, 0x87, 0xe5, + 0x7a, 0xb5, 0x7c, 0x75, 0x5d, 0x2f, 0x46, 0xcd, 0x50, 0xe0, 0x27, 0x9e, 0xb5, 0xce, 0xbe, 0x7c, + 0x51, 0x33, 0xbe, 0x7a, 0x51, 0x33, 0xfe, 0xf5, 0xa2, 0x66, 0x3c, 0x7f, 0x59, 0x5b, 0xfa, 0xea, + 0x65, 0x6d, 0xe9, 0xef, 0x2f, 0x6b, 0x4b, 0xbf, 0xfa, 0x78, 0xe0, 0xf2, 0xe1, 0xb8, 0xd7, 0xe8, + 0xd3, 0xd1, 0xae, 0x87, 0x9f, 0x5d, 0x7a, 0xc4, 0x19, 0x90, 0x30, 0xf1, 0xf8, 0x61, 0x9f, 0x86, + 0xfa, 0x1f, 0xa3, 0xdd, 0x9b, 0x7f, 0xef, 0xf4, 0x56, 0xa4, 0xfc, 0xa3, 0xff, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe9, 0x7b, 0x39, 0xf5, 0x9f, 0x12, 0x00, 0x00, } func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { @@ -1939,7 +1991,7 @@ func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { +func (m *Evidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1949,19 +2001,38 @@ func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { +func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.VoteB != nil { + if m.Sum != nil { { - size, err := m.VoteB.MarshalToSizedBuffer(dAtA[:i]) + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DuplicateVoteEvidence != nil { + { + size, err := m.DuplicateVoteEvidence.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1969,11 +2040,20 @@ func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } - if m.VoteA != nil { + return len(dAtA) - i, nil +} +func (m *Evidence_LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LightClientAttackEvidence != nil { { - size, err := m.VoteA.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LightClientAttackEvidence.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1981,12 +2061,11 @@ func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } return len(dAtA) - i, nil } - -func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { +func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1996,24 +2075,49 @@ func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { +func (m *DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.CommonHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.CommonHeight)) + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x2a + if m.ValidatorPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorPower)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x20 } - if m.ConflictingBlock != nil { + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.VoteB != nil { { - size, err := m.ConflictingBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.VoteB.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.VoteA != nil { + { + size, err := m.VoteA.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2026,7 +2130,7 @@ func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *Evidence) Marshal() (dAtA []byte, err error) { +func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2036,59 +2140,51 @@ func (m *Evidence) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { +func (m *LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Sum != nil { - { - size := m.Sum.Size() - i -= size - if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } + n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err14 != nil { + return 0, err14 } - return len(dAtA) - i, nil -} - -func (m *Evidence_DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Evidence_DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DuplicateVoteEvidence != nil { - { - size, err := m.DuplicateVoteEvidence.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i -= n14 + i = encodeVarintTypes(dAtA, i, uint64(n14)) + i-- + dAtA[i] = 0x2a + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x20 + } + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } + } + if m.CommonHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CommonHeight)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x10 } - return len(dAtA) - i, nil -} -func (m *Evidence_LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.LightClientAttackEvidence != nil { + if m.ConflictingBlock != nil { { - size, err := m.LightClientAttackEvidence.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConflictingBlock.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2096,11 +2192,12 @@ func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) ( i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *EvidenceData) Marshal() (dAtA []byte, err error) { + +func (m *EvidenceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2110,12 +2207,12 @@ func (m *EvidenceData) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *EvidenceData) MarshalTo(dAtA []byte) (int, error) { +func (m *EvidenceList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EvidenceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EvidenceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2323,12 +2420,12 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n14, err14 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err14 != nil { - return 0, err14 + n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err16 != nil { + return 0, err16 } - i -= n14 - i = encodeVarintTypes(dAtA, i, uint64(n14)) + i -= n16 + i = encodeVarintTypes(dAtA, i, uint64(n16)) i-- dAtA[i] = 0x2a { @@ -2443,12 +2540,12 @@ func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err17 != nil { - return 0, err17 + n19, err19 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err19 != nil { + return 0, err19 } - i -= n17 - i = encodeVarintTypes(dAtA, i, uint64(n17)) + i -= n19 + i = encodeVarintTypes(dAtA, i, uint64(n19)) i-- dAtA[i] = 0x1a if len(m.ValidatorAddress) > 0 { @@ -2493,12 +2590,12 @@ func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n18, err18 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err18 != nil { - return 0, err18 + n20, err20 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err20 != nil { + return 0, err20 } - i -= n18 - i = encodeVarintTypes(dAtA, i, uint64(n18)) + i -= n20 + i = encodeVarintTypes(dAtA, i, uint64(n20)) i-- dAtA[i] = 0x32 { @@ -2869,6 +2966,42 @@ func (m *Data) Size() (n int) { return n } +func (m *Evidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Evidence_DuplicateVoteEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DuplicateVoteEvidence != nil { + l = m.DuplicateVoteEvidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Evidence_LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightClientAttackEvidence != nil { + l = m.LightClientAttackEvidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func (m *DuplicateVoteEvidence) Size() (n int) { if m == nil { return 0 @@ -2883,6 +3016,14 @@ func (m *DuplicateVoteEvidence) Size() (n int) { l = m.VoteB.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) + } + if m.ValidatorPower != 0 { + n += 1 + sovTypes(uint64(m.ValidatorPower)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) return n } @@ -2899,46 +3040,21 @@ func (m *LightClientAttackEvidence) Size() (n int) { if m.CommonHeight != 0 { n += 1 + sovTypes(uint64(m.CommonHeight)) } - return n -} - -func (m *Evidence) Size() (n int) { - if m == nil { - return 0 + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) return n } -func (m *Evidence_DuplicateVoteEvidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DuplicateVoteEvidence != nil { - l = m.DuplicateVoteEvidence.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *Evidence_LightClientAttackEvidence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.LightClientAttackEvidence != nil { - l = m.LightClientAttackEvidence.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} -func (m *EvidenceData) Size() (n int) { +func (m *EvidenceList) Size() (n int) { if m == nil { return 0 } @@ -4266,7 +4382,7 @@ func (m *Data) Unmarshal(dAtA []byte) error { } return nil } -func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { +func (m *Evidence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4289,15 +4405,15 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DuplicateVoteEvidence: wiretype end group for non-group") + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DuplicateVoteEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteA", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DuplicateVoteEvidence", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4324,16 +4440,15 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.VoteA == nil { - m.VoteA = &Vote{} - } - if err := m.VoteA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &DuplicateVoteEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Sum = &Evidence_DuplicateVoteEvidence{v} iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VoteB", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LightClientAttackEvidence", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4360,12 +4475,11 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.VoteB == nil { - m.VoteB = &Vote{} - } - if err := m.VoteB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &LightClientAttackEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Sum = &Evidence_LightClientAttackEvidence{v} iNdEx = postIndex default: iNdEx = preIndex @@ -4391,7 +4505,7 @@ func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { } return nil } -func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { +func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4414,15 +4528,15 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LightClientAttackEvidence: wiretype end group for non-group") + return fmt.Errorf("proto: DuplicateVoteEvidence: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LightClientAttackEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DuplicateVoteEvidence: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConflictingBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteA", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4449,18 +4563,54 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConflictingBlock == nil { - m.ConflictingBlock = &LightBlock{} + if m.VoteA == nil { + m.VoteA = &Vote{} } - if err := m.ConflictingBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.VoteA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteB", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteB == nil { + m.VoteB = &Vote{} + } + if err := m.VoteB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CommonHeight", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) } - m.CommonHeight = 0 + m.TotalVotingPower = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -4470,11 +4620,63 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CommonHeight |= int64(b&0x7F) << shift + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorPower", wireType) + } + m.ValidatorPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValidatorPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -4499,7 +4701,7 @@ func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { } return nil } -func (m *Evidence) Unmarshal(dAtA []byte) error { +func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4522,15 +4724,15 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + return fmt.Errorf("proto: LightClientAttackEvidence: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LightClientAttackEvidence: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DuplicateVoteEvidence", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConflictingBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4557,15 +4759,35 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &DuplicateVoteEvidence{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConflictingBlock == nil { + m.ConflictingBlock = &LightBlock{} + } + if err := m.ConflictingBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Sum = &Evidence_DuplicateVoteEvidence{v} iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonHeight", wireType) + } + m.CommonHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommonHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LightClientAttackEvidence", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4592,11 +4814,62 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &LightClientAttackEvidence{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ByzantineValidators = append(m.ByzantineValidators, &Validator{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { return err } - m.Sum = &Evidence_LightClientAttackEvidence{v} iNdEx = postIndex default: iNdEx = preIndex @@ -4622,7 +4895,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } return nil } -func (m *EvidenceData) Unmarshal(dAtA []byte) error { +func (m *EvidenceList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4645,10 +4918,10 @@ func (m *EvidenceData) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EvidenceData: wiretype end group for non-group") + return fmt.Errorf("proto: EvidenceList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EvidenceData: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EvidenceList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 96fd42a177..971af04c40 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -86,25 +86,12 @@ message Data { // Txs that will be applied by state @ block.Height+1. // NOTE: not all txs here are valid. We're just agreeing on the order first. // This means that block.AppHash does not include these txs. - repeated bytes txs = 1; - + repeated bytes txs = 1; IntermediateStateRoots intermediate_state_roots = 2 [(gogoproto.nullable) = false]; - EvidenceData evidence = 3 [(gogoproto.nullable) = false]; + EvidenceList evidence = 3 [(gogoproto.nullable) = false]; Messages messages = 4 [(gogoproto.nullable) = false]; } -// DuplicateVoteEvidence contains evidence a validator signed two conflicting -// votes. -message DuplicateVoteEvidence { - Vote vote_a = 1; - Vote vote_b = 2; -} - -message LightClientAttackEvidence { - LightBlock conflicting_block = 1; - int64 common_height = 2; -} - message Evidence { oneof sum { DuplicateVoteEvidence duplicate_vote_evidence = 1; @@ -112,8 +99,25 @@ message Evidence { } } -// EvidenceData contains any evidence of malicious wrong-doing by validators -message EvidenceData { +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +message DuplicateVoteEvidence { + tendermint.types.Vote vote_a = 1; + tendermint.types.Vote vote_b = 2; + int64 total_voting_power = 3; + int64 validator_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +message LightClientAttackEvidence { + tendermint.types.LightBlock conflicting_block = 1; + int64 common_height = 2; + repeated tendermint.types.Validator byzantine_validators = 3; + int64 total_voting_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +message EvidenceList { repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; } diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 6d4adde68f..5017f5bd7a 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -23,61 +23,6 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// App includes the protocol and software version for the application. -// This information is included in ResponseInfo. The App.Protocol can be -// updated in ResponseEndBlock. -type App struct { - Protocol uint64 `protobuf:"varint,1,opt,name=protocol,proto3" json:"protocol,omitempty"` - Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` -} - -func (m *App) Reset() { *m = App{} } -func (m *App) String() string { return proto.CompactTextString(m) } -func (*App) ProtoMessage() {} -func (*App) Descriptor() ([]byte, []int) { - return fileDescriptor_f9b42966edc5edad, []int{0} -} -func (m *App) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *App) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_App.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *App) XXX_Merge(src proto.Message) { - xxx_messageInfo_App.Merge(m, src) -} -func (m *App) XXX_Size() int { - return m.Size() -} -func (m *App) XXX_DiscardUnknown() { - xxx_messageInfo_App.DiscardUnknown(m) -} - -var xxx_messageInfo_App proto.InternalMessageInfo - -func (m *App) GetProtocol() uint64 { - if m != nil { - return m.Protocol - } - return 0 -} - -func (m *App) GetSoftware() string { - if m != nil { - return m.Software - } - return "" -} - // Consensus captures the consensus rules for processing a block in the blockchain, // including all blockchain data structures and the rules of the application's // state transition machine. @@ -90,7 +35,7 @@ func (m *Consensus) Reset() { *m = Consensus{} } func (m *Consensus) String() string { return proto.CompactTextString(m) } func (*Consensus) ProtoMessage() {} func (*Consensus) Descriptor() ([]byte, []int) { - return fileDescriptor_f9b42966edc5edad, []int{1} + return fileDescriptor_f9b42966edc5edad, []int{0} } func (m *Consensus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,29 +79,26 @@ func (m *Consensus) GetApp() uint64 { } func init() { - proto.RegisterType((*App)(nil), "tendermint.version.App") proto.RegisterType((*Consensus)(nil), "tendermint.version.Consensus") } func init() { proto.RegisterFile("tendermint/version/types.proto", fileDescriptor_f9b42966edc5edad) } var fileDescriptor_f9b42966edc5edad = []byte{ - // 231 bytes of a gzipped FileDescriptorProto + // 193 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, - 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, 0x92, 0x2d, - 0x17, 0xb3, 0x63, 0x41, 0x81, 0x90, 0x14, 0x17, 0x07, 0x98, 0x9f, 0x9c, 0x9f, 0x23, 0xc1, 0xa8, - 0xc0, 0xa8, 0xc1, 0x12, 0x04, 0xe7, 0x83, 0xe4, 0x8a, 0xf3, 0xd3, 0x4a, 0xca, 0x13, 0x8b, 0x52, - 0x25, 0x98, 0x14, 0x18, 0x35, 0x38, 0x83, 0xe0, 0x7c, 0x25, 0x4b, 0x2e, 0x4e, 0xe7, 0xfc, 0xbc, - 0xe2, 0xd4, 0xbc, 0xe2, 0xd2, 0x62, 0x21, 0x11, 0x2e, 0xd6, 0xa4, 0x9c, 0xfc, 0xe4, 0x6c, 0xa8, - 0x09, 0x10, 0x8e, 0x90, 0x00, 0x17, 0x73, 0x62, 0x41, 0x01, 0x58, 0x27, 0x4b, 0x10, 0x88, 0x69, - 0xc5, 0xf2, 0x62, 0x81, 0x3c, 0xa3, 0x53, 0xc4, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, - 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, - 0x31, 0x44, 0xd9, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xe7, 0x24, - 0x56, 0x55, 0xe6, 0xa4, 0xa6, 0xa4, 0xa7, 0x16, 0x21, 0x31, 0x75, 0x93, 0xf3, 0x8b, 0x52, 0xf5, - 0x21, 0x7e, 0xc1, 0x0c, 0x89, 0x24, 0x36, 0xb0, 0x8c, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x75, - 0x86, 0xe8, 0x84, 0x26, 0x01, 0x00, 0x00, + 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, 0x92, 0x25, + 0x17, 0xa7, 0x73, 0x7e, 0x5e, 0x71, 0x6a, 0x5e, 0x71, 0x69, 0xb1, 0x90, 0x08, 0x17, 0x6b, 0x52, + 0x4e, 0x7e, 0x72, 0xb6, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x84, 0x23, 0x24, 0xc0, 0xc5, + 0x9c, 0x58, 0x50, 0x20, 0xc1, 0x04, 0x16, 0x03, 0x31, 0xad, 0x58, 0x5e, 0x2c, 0x90, 0x67, 0x74, + 0x8a, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, + 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xbb, 0xf4, 0xcc, 0x92, + 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0x9c, 0xc4, 0xaa, 0xca, 0x9c, 0xd4, 0x94, 0xf4, + 0xd4, 0x22, 0x24, 0xa6, 0x6e, 0x72, 0x7e, 0x51, 0xaa, 0x3e, 0xc4, 0x31, 0x98, 0x5e, 0x49, 0x62, + 0x03, 0xcb, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x91, 0xed, 0xe6, 0xe7, 0x00, 0x00, + 0x00, } func (this *Consensus) Equal(that interface{}) bool { @@ -186,41 +128,6 @@ func (this *Consensus) Equal(that interface{}) bool { } return true } -func (m *App) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *App) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *App) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Software) > 0 { - i -= len(m.Software) - copy(dAtA[i:], m.Software) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Software))) - i-- - dAtA[i] = 0x12 - } - if m.Protocol != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *Consensus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -265,22 +172,6 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *App) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Protocol != 0 { - n += 1 + sovTypes(uint64(m.Protocol)) - } - l = len(m.Software) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - func (m *Consensus) Size() (n int) { if m == nil { return 0 @@ -302,110 +193,6 @@ func sovTypes(x uint64) (n int) { func sozTypes(x uint64) (n int) { return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *App) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: App: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: App: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - m.Protocol = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Protocol |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Software", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Software = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Consensus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto index 81851d4f91..ca18b3f47e 100644 --- a/proto/tendermint/version/types.proto +++ b/proto/tendermint/version/types.proto @@ -5,14 +5,6 @@ option go_package = "github.com/lazyledger/lazyledger-core/proto/tendermint/vers import "gogoproto/gogo.proto"; -// App includes the protocol and software version for the application. -// This information is included in ResponseInfo. The App.Protocol can be -// updated in ResponseEndBlock. -message App { - uint64 protocol = 1; - string software = 2; -} - // Consensus captures the consensus rules for processing a block in the blockchain, // including all blockchain data structures and the rules of the application's // state transition machine. diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 67c7590c0d..5e17c21294 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -1,6 +1,8 @@ package proxy import ( + "context" + abcicli "github.com/lazyledger/lazyledger-core/abci/client" "github.com/lazyledger/lazyledger-core/abci/types" ) @@ -14,42 +16,41 @@ type AppConnConsensus interface { SetResponseCallback(abcicli.Callback) Error() error - InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) - - BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(types.RequestDeliverTx) *abcicli.ReqRes - EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) - CommitSync() (*types.ResponseCommit, error) + InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) - PreprocessTxsSync(types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) + PreprocessTxsSync(context.Context, types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) + BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) + DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abcicli.ReqRes, error) + EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) + CommitSync(context.Context) (*types.ResponseCommit, error) } type AppConnMempool interface { SetResponseCallback(abcicli.Callback) Error() error - CheckTxAsync(types.RequestCheckTx) *abcicli.ReqRes - CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error) + CheckTxAsync(context.Context, types.RequestCheckTx) (*abcicli.ReqRes, error) + CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - FlushAsync() *abcicli.ReqRes - FlushSync() error + FlushAsync(context.Context) (*abcicli.ReqRes, error) + FlushSync(context.Context) error } type AppConnQuery interface { Error() error - EchoSync(string) (*types.ResponseEcho, error) - InfoSync(types.RequestInfo) (*types.ResponseInfo, error) - QuerySync(types.RequestQuery) (*types.ResponseQuery, error) + EchoSync(context.Context, string) (*types.ResponseEcho, error) + InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) + QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) } type AppConnSnapshot interface { Error() error - ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) } //----------------------------------------------------------------------------------------- @@ -73,28 +74,40 @@ func (app *appConnConsensus) Error() error { return app.appConn.Error() } -func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { - return app.appConn.InitChainSync(req) +func (app *appConnConsensus) InitChainSync( + ctx context.Context, + req types.RequestInitChain, +) (*types.ResponseInitChain, error) { + return app.appConn.InitChainSync(ctx, req) } -func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - return app.appConn.BeginBlockSync(req) +func (app *appConnConsensus) BeginBlockSync( + ctx context.Context, + req types.RequestBeginBlock, +) (*types.ResponseBeginBlock, error) { + return app.appConn.BeginBlockSync(ctx, req) } -func (app *appConnConsensus) DeliverTxAsync(req types.RequestDeliverTx) *abcicli.ReqRes { - return app.appConn.DeliverTxAsync(req) +func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*abcicli.ReqRes, error) { + return app.appConn.DeliverTxAsync(ctx, req) } -func (app *appConnConsensus) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { - return app.appConn.EndBlockSync(req) +func (app *appConnConsensus) EndBlockSync( + ctx context.Context, + req types.RequestEndBlock, +) (*types.ResponseEndBlock, error) { + return app.appConn.EndBlockSync(ctx, req) } -func (app *appConnConsensus) CommitSync() (*types.ResponseCommit, error) { - return app.appConn.CommitSync() +func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { + return app.appConn.CommitSync(ctx) } -func (app *appConnConsensus) PreprocessTxsSync(req types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { - return app.appConn.PreprocessTxsSync(req) +func (app *appConnConsensus) PreprocessTxsSync( + ctx context.Context, + req types.RequestPreprocessTxs, +) (*types.ResponsePreprocessTxs, error) { + return app.appConn.PreprocessTxsSync(ctx, req) } //------------------------------------------------ @@ -118,20 +131,20 @@ func (app *appConnMempool) Error() error { return app.appConn.Error() } -func (app *appConnMempool) FlushAsync() *abcicli.ReqRes { - return app.appConn.FlushAsync() +func (app *appConnMempool) FlushAsync(ctx context.Context) (*abcicli.ReqRes, error) { + return app.appConn.FlushAsync(ctx) } -func (app *appConnMempool) FlushSync() error { - return app.appConn.FlushSync() +func (app *appConnMempool) FlushSync(ctx context.Context) error { + return app.appConn.FlushSync(ctx) } -func (app *appConnMempool) CheckTxAsync(req types.RequestCheckTx) *abcicli.ReqRes { - return app.appConn.CheckTxAsync(req) +func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abcicli.ReqRes, error) { + return app.appConn.CheckTxAsync(ctx, req) } -func (app *appConnMempool) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { - return app.appConn.CheckTxSync(req) +func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + return app.appConn.CheckTxSync(ctx, req) } //------------------------------------------------ @@ -151,16 +164,16 @@ func (app *appConnQuery) Error() error { return app.appConn.Error() } -func (app *appConnQuery) EchoSync(msg string) (*types.ResponseEcho, error) { - return app.appConn.EchoSync(msg) +func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + return app.appConn.EchoSync(ctx, msg) } -func (app *appConnQuery) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - return app.appConn.InfoSync(req) +func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + return app.appConn.InfoSync(ctx, req) } -func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (*types.ResponseQuery, error) { - return app.appConn.QuerySync(reqQuery) +func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { + return app.appConn.QuerySync(ctx, reqQuery) } //------------------------------------------------ @@ -180,20 +193,28 @@ func (app *appConnSnapshot) Error() error { return app.appConn.Error() } -func (app *appConnSnapshot) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - return app.appConn.ListSnapshotsSync(req) +func (app *appConnSnapshot) ListSnapshotsSync( + ctx context.Context, + req types.RequestListSnapshots, +) (*types.ResponseListSnapshots, error) { + return app.appConn.ListSnapshotsSync(ctx, req) } -func (app *appConnSnapshot) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - return app.appConn.OfferSnapshotSync(req) +func (app *appConnSnapshot) OfferSnapshotSync( + ctx context.Context, + req types.RequestOfferSnapshot, +) (*types.ResponseOfferSnapshot, error) { + return app.appConn.OfferSnapshotSync(ctx, req) } func (app *appConnSnapshot) LoadSnapshotChunkSync( + ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - return app.appConn.LoadSnapshotChunkSync(req) + return app.appConn.LoadSnapshotChunkSync(ctx, req) } func (app *appConnSnapshot) ApplySnapshotChunkSync( + ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - return app.appConn.ApplySnapshotChunkSync(req) + return app.appConn.ApplySnapshotChunkSync(ctx, req) } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index 3d206e68e6..52bbc55407 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "fmt" "strings" "testing" @@ -15,30 +16,30 @@ import ( //---------------------------------------- -type AppConnTest interface { - EchoAsync(string) *abcicli.ReqRes - FlushSync() error - InfoSync(types.RequestInfo) (*types.ResponseInfo, error) +type appConnTestI interface { + EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) + FlushSync(context.Context) error + InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) } type appConnTest struct { appConn abcicli.Client } -func NewAppConnTest(appConn abcicli.Client) AppConnTest { +func newAppConnTest(appConn abcicli.Client) appConnTestI { return &appConnTest{appConn} } -func (app *appConnTest) EchoAsync(msg string) *abcicli.ReqRes { - return app.appConn.EchoAsync(msg) +func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { + return app.appConn.EchoAsync(ctx, msg) } -func (app *appConnTest) FlushSync() error { - return app.appConn.FlushSync() +func (app *appConnTest) FlushSync(ctx context.Context) error { + return app.appConn.FlushSync(ctx) } -func (app *appConnTest) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - return app.appConn.InfoSync(req) +func (app *appConnTest) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + return app.appConn.InfoSync(ctx, req) } //---------------------------------------- @@ -71,13 +72,23 @@ func TestEcho(t *testing.T) { t.Fatalf("Error starting ABCI client: %v", err.Error()) } - proxy := NewAppConnTest(cli) + proxy := newAppConnTest(cli) t.Log("Connected") + ctx := context.Background() for i := 0; i < 1000; i++ { - proxy.EchoAsync(fmt.Sprintf("echo-%v", i)) + _, err = proxy.EchoAsync(ctx, fmt.Sprintf("echo-%v", i)) + if err != nil { + t.Error(err) + } + // flush sometimes + if i%128 == 0 { + if err := proxy.FlushSync(ctx); err != nil { + t.Error(err) + } + } } - if err := proxy.FlushSync(); err != nil { + if err := proxy.FlushSync(ctx); err != nil { t.Error(err) } } @@ -109,15 +120,25 @@ func BenchmarkEcho(b *testing.B) { b.Fatalf("Error starting ABCI client: %v", err.Error()) } - proxy := NewAppConnTest(cli) + proxy := newAppConnTest(cli) b.Log("Connected") echoString := strings.Repeat(" ", 200) b.StartTimer() // Start benchmarking tests + ctx := context.Background() for i := 0; i < b.N; i++ { - proxy.EchoAsync(echoString) + _, err = proxy.EchoAsync(ctx, echoString) + if err != nil { + b.Error(err) + } + // flush sometimes + if i%128 == 0 { + if err := proxy.FlushSync(ctx); err != nil { + b.Error(err) + } + } } - if err := proxy.FlushSync(); err != nil { + if err := proxy.FlushSync(ctx); err != nil { b.Error(err) } @@ -152,10 +173,10 @@ func TestInfo(t *testing.T) { t.Fatalf("Error starting ABCI client: %v", err.Error()) } - proxy := NewAppConnTest(cli) + proxy := newAppConnTest(cli) t.Log("Connected") - resInfo, err := proxy.InfoSync(RequestInfo) + resInfo, err := proxy.InfoSync(context.Background(), RequestInfo) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index bc293344b8..cfc589b1d7 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -1,9 +1,12 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks import ( + context "context" + abcicli "github.com/lazyledger/lazyledger-core/abci/client" + mock "github.com/stretchr/testify/mock" types "github.com/lazyledger/lazyledger-core/abci/types" @@ -14,13 +17,13 @@ type AppConnConsensus struct { mock.Mock } -// BeginBlockSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - ret := _m.Called(_a0) +// BeginBlockSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseBeginBlock) @@ -28,8 +31,8 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types. } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -37,13 +40,13 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types. return r0, r1 } -// CommitSync provides a mock function with given fields: -func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { - ret := _m.Called() +// CommitSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { + ret := _m.Called(_a0) var r0 *types.ResponseCommit - if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseCommit) @@ -51,8 +54,8 @@ func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -60,29 +63,36 @@ func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { return r0, r1 } -// DeliverTxAsync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { - ret := _m.Called(_a0) +// DeliverTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// EndBlockSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { - ret := _m.Called(_a0) +// EndBlockSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseEndBlock) @@ -90,8 +100,8 @@ func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.Resp } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -113,13 +123,13 @@ func (_m *AppConnConsensus) Error() error { return r0 } -// InitChainSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { - ret := _m.Called(_a0) +// InitChainSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseInitChain - if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseInitChain) @@ -127,8 +137,8 @@ func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.Re } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -136,13 +146,13 @@ func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.Re return r0, r1 } -// PreprocessTxsSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) PreprocessTxsSync(_a0 types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { - ret := _m.Called(_a0) +// PreprocessTxsSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) PreprocessTxsSync(_a0 context.Context, _a1 types.RequestPreprocessTxs) (*types.ResponsePreprocessTxs, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponsePreprocessTxs - if rf, ok := ret.Get(0).(func(types.RequestPreprocessTxs) *types.ResponsePreprocessTxs); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPreprocessTxs) *types.ResponsePreprocessTxs); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponsePreprocessTxs) @@ -150,8 +160,8 @@ func (_m *AppConnConsensus) PreprocessTxsSync(_a0 types.RequestPreprocessTxs) (* } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestPreprocessTxs) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPreprocessTxs) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index 1e208d18c1..f03fdd3dbd 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -1,9 +1,12 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks import ( + context "context" + abcicli "github.com/lazyledger/lazyledger-core/abci/client" + mock "github.com/stretchr/testify/mock" types "github.com/lazyledger/lazyledger-core/abci/types" @@ -14,29 +17,36 @@ type AppConnMempool struct { mock.Mock } -// CheckTxAsync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes { - ret := _m.Called(_a0) +// CheckTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// CheckTxSync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) { - ret := _m.Called(_a0) +// CheckTxSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnMempool) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseCheckTx - if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseCheckTx) @@ -44,8 +54,8 @@ func (_m *AppConnMempool) CheckTxSync(_a0 types.RequestCheckTx) (*types.Response } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -67,29 +77,36 @@ func (_m *AppConnMempool) Error() error { return r0 } -// FlushAsync provides a mock function with given fields: -func (_m *AppConnMempool) FlushAsync() *abcicli.ReqRes { - ret := _m.Called() +// FlushAsync provides a mock function with given fields: _a0 +func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { + ret := _m.Called(_a0) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*abcicli.ReqRes) } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// FlushSync provides a mock function with given fields: -func (_m *AppConnMempool) FlushSync() error { - ret := _m.Called() +// FlushSync provides a mock function with given fields: _a0 +func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index ac1044a1f4..a2ee2edcc9 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -1,8 +1,10 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks import ( + context "context" + mock "github.com/stretchr/testify/mock" types "github.com/lazyledger/lazyledger-core/abci/types" @@ -13,13 +15,13 @@ type AppConnQuery struct { mock.Mock } -// EchoSync provides a mock function with given fields: _a0 -func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { - ret := _m.Called(_a0) +// EchoSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) EchoSync(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseEcho - if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseEcho) @@ -27,8 +29,8 @@ func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -50,13 +52,13 @@ func (_m *AppConnQuery) Error() error { return r0 } -// InfoSync provides a mock function with given fields: _a0 -func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { - ret := _m.Called(_a0) +// InfoSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseInfo - if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseInfo) @@ -64,8 +66,8 @@ func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, er } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -73,13 +75,13 @@ func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, er return r0, r1 } -// QuerySync provides a mock function with given fields: _a0 -func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) { - ret := _m.Called(_a0) +// QuerySync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseQuery - if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseQuery) @@ -87,8 +89,8 @@ func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index 329b0145b3..e8d10d1c8a 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -1,8 +1,10 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks import ( + context "context" + mock "github.com/stretchr/testify/mock" types "github.com/lazyledger/lazyledger-core/abci/types" @@ -13,13 +15,13 @@ type AppConnSnapshot struct { mock.Mock } -// ApplySnapshotChunkSync provides a mock function with given fields: _a0 -func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - ret := _m.Called(_a0) +// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseApplySnapshotChunk - if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) @@ -27,8 +29,8 @@ func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshot } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -50,13 +52,13 @@ func (_m *AppConnSnapshot) Error() error { return r0 } -// ListSnapshotsSync provides a mock function with given fields: _a0 -func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - ret := _m.Called(_a0) +// ListSnapshotsSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseListSnapshots - if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseListSnapshots) @@ -64,8 +66,8 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*t } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -73,13 +75,13 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*t return r0, r1 } -// LoadSnapshotChunkSync provides a mock function with given fields: _a0 -func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - ret := _m.Called(_a0) +// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseLoadSnapshotChunk - if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) @@ -87,8 +89,8 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotCh } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -96,13 +98,13 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotCh return r0, r1 } -// OfferSnapshotSync provides a mock function with given fields: _a0 -func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - ret := _m.Called(_a0) +// OfferSnapshotSync provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0, _a1) var r0 *types.ResponseOfferSnapshot - if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*types.ResponseOfferSnapshot) @@ -110,8 +112,8 @@ func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*t } var r1 error - if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 236f7c863d..425ac950a9 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -2,10 +2,11 @@ package proxy import ( "fmt" + "os" + "syscall" abcicli "github.com/lazyledger/lazyledger-core/abci/client" tmlog "github.com/lazyledger/lazyledger-core/libs/log" - tmos "github.com/lazyledger/lazyledger-core/libs/os" "github.com/lazyledger/lazyledger-core/libs/service" ) @@ -129,8 +130,7 @@ func (app *multiAppConn) killTMOnClientError() { logger.Error( fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), "err", err) - killErr := tmos.Kill() - if killErr != nil { + if killErr := kill(); killErr != nil { logger.Error("Failed to kill this process - please do so manually", "err", killErr) } } @@ -189,3 +189,12 @@ func (app *multiAppConn) abciClientFor(conn string) (abcicli.Client, error) { } return c, nil } + +func kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + + return p.Signal(syscall.SIGTERM) +} diff --git a/proxy/version.go b/proxy/version.go index d1ae829a23..e541a525ac 100644 --- a/proxy/version.go +++ b/proxy/version.go @@ -9,7 +9,8 @@ import ( // the abci.RequestInfo message during handshake with the app. // It contains only compile-time version information. var RequestInfo = abci.RequestInfo{ - Version: version.Version, + Version: version.TMCoreSemVer, BlockVersion: version.BlockProtocol, P2PVersion: version.P2PProtocol, + AbciVersion: version.ABCIVersion, } diff --git a/release_notes.md b/release_notes.md new file mode 100644 index 0000000000..a537871c58 --- /dev/null +++ b/release_notes.md @@ -0,0 +1 @@ + diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 4eb21e4aff..2570a71ada 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -42,7 +42,10 @@ func newEvidence(t *testing.T, val *privval.FilePV, vote2.Signature, err = val.Key.PrivKey.Sign(types.VoteSignBytes(chainID, v2)) require.NoError(t, err) - return types.NewDuplicateVoteEvidence(vote, vote2) + validator := types.NewValidator(val.Key.PubKey, 10) + valSet := types.NewValidatorSet([]*types.Validator{validator}) + + return types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) } func makeEvidences( @@ -113,8 +116,9 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { var ( config = rpctest.GetConfig() chainID = config.ChainID() - pv = privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) ) + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + require.NoError(t, err) for i, c := range GetClients() { correct, fakes := makeEvidences(t, pv, chainID) diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index bf5d27f7a9..fda3d658c7 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks diff --git a/rpc/core/abci.go b/rpc/core/abci.go index b8bcf12255..39ae23234a 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -17,7 +17,7 @@ func ABCIQuery( height int64, prove bool, ) (*ctypes.ResultABCIQuery, error) { - resQuery, err := env.ProxyAppQuery.QuerySync(abci.RequestQuery{ + resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ Path: path, Data: data, Height: height, @@ -33,7 +33,7 @@ func ABCIQuery( // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - resInfo, err := env.ProxyAppQuery.InfoSync(proxy.RequestInfo) + resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) if err != nil { return nil, err } diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index 9d6c587739..f7a875a345 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -10,7 +10,7 @@ import ( ) // BroadcastEvidence broadcasts evidence of the misbehavior. -// More: https://docs.tendermint.com/master/rpc/#/Info/broadcast_evidence +// More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { if ev == nil { return nil, errors.New("no evidence was provided") diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 69db980c1c..f5266fe9b1 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -20,7 +20,7 @@ import ( // CheckTx nor DeliverTx results. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{Context: ctx.Context()}) if err != nil { return nil, err @@ -35,7 +35,7 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas resCh := make(chan *abci.Response, 1) err := env.Mempool.CheckTx(tx, func(res *abci.Response) { resCh <- res - }, mempl.TxInfo{}) + }, mempl.TxInfo{Context: ctx.Context()}) if err != nil { return nil, err } @@ -81,7 +81,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc checkTxResCh := make(chan *abci.Response, 1) err = env.Mempool.CheckTx(tx, func(res *abci.Response) { checkTxResCh <- res - }, mempl.TxInfo{}) + }, mempl.TxInfo{Context: ctx.Context()}) if err != nil { env.Logger.Error("Error on broadcastTxCommit", "err", err) return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) @@ -159,7 +159,7 @@ func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, err // be added to the mempool either. // More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx func CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx}) + res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err } diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 4a585c809d..cfa58676d6 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -1,9 +1,9 @@ package coregrpc import ( + "context" "net" - "golang.org/x/net/context" "google.golang.org/grpc" tmnet "github.com/lazyledger/lazyledger-core/libs/net" diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index b2ec66d595..6718414205 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -151,10 +151,13 @@ func NewWithHTTPClient(remote string, client *http.Client) (*Client, error) { } // Call issues a POST HTTP request. Requests are JSON encoded. Content-Type: -// text/json. -func (c *Client) Call(ctx context.Context, method string, - params map[string]interface{}, result interface{}) (interface{}, error) { - +// application/json. +func (c *Client) Call( + ctx context.Context, + method string, + params map[string]interface{}, + result interface{}, +) (interface{}, error) { id := c.nextRequestID() request, err := types.MapToRequest(id, method, params) @@ -172,14 +175,18 @@ func (c *Client) Call(ctx context.Context, method string, if err != nil { return nil, fmt.Errorf("request failed: %w", err) } - httpRequest.Header.Set("Content-Type", "text/json") + + httpRequest.Header.Set("Content-Type", "application/json") + if c.username != "" || c.password != "" { httpRequest.SetBasicAuth(c.username, c.password) } + httpResponse, err := c.client.Do(httpRequest) if err != nil { return nil, fmt.Errorf("post failed: %w", err) } + defer httpResponse.Body.Close() responseBytes, err := ioutil.ReadAll(httpResponse.Body) @@ -216,14 +223,18 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque if err != nil { return nil, fmt.Errorf("new request: %w", err) } - httpRequest.Header.Set("Content-Type", "text/json") + + httpRequest.Header.Set("Content-Type", "application/json") + if c.username != "" || c.password != "" { httpRequest.SetBasicAuth(c.username, c.password) } + httpResponse, err := c.client.Do(httpRequest) if err != nil { return nil, fmt.Errorf("post: %w", err) } + defer httpResponse.Body.Close() responseBytes, err := ioutil.ReadAll(httpResponse.Body) diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 2956b0fe52..7473b86203 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -1144,7 +1144,7 @@ paths: type: string example: "JSON_EVIDENCE_encoded" tags: - - Info + - Evidence description: | Broadcast evidence of the misbehavior. responses: diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index d401eac7f7..1e7c4e91c6 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -161,7 +161,10 @@ func NewTendermint(app abci.Application, opts *Options) *nm.Node { } pvKeyFile := config.PrivValidatorKeyFile() pvKeyStateFile := config.PrivValidatorStateFile() - pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) + pv, err := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) + if err != nil { + panic(err) + } papp := proxy.NewLocalClientCreator(app) nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) if err != nil { diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000000..52348b635d --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +set -ue + +# Expect the following envvars to be set: +# - APP +# - VERSION +# - COMMIT +# - TARGET_OS +# - LEDGER_ENABLED +# - DEBUG + +# Source builder's functions library +. /usr/local/share/cosmos-sdk/buildlib.sh + +# These variables are now available +# - BASEDIR +# - OUTDIR + +# Build for each os-architecture pair +for platform in ${TARGET_PLATFORMS} ; do + # This function sets GOOS, GOARCH, and OS_FILE_EXT environment variables + # according to the build target platform. OS_FILE_EXT is empty in all + # cases except when the target platform is 'windows'. + setup_build_env_for_platform "${platform}" + + make clean + echo Building for $(go env GOOS)/$(go env GOARCH) >&2 + GOROOT_FINAL="$(go env GOROOT)" \ + make build LDFLAGS=-buildid=${VERSION} COMMIT=${COMMIT} + mv ./build/${APP}${OS_FILE_EXT} ${OUTDIR}/${APP}-${VERSION}-$(go env GOOS)-$(go env GOARCH)${OS_FILE_EXT} + + # This function restore the build environment variables to their + # original state. + restore_build_env +done + +# Generate and display build report. +generate_build_report +cat ${OUTDIR}/build_report diff --git a/scripts/dist.sh b/scripts/dist.sh index 81fdf9813f..2343804034 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -20,7 +20,7 @@ rm -rf build/pkg mkdir -p build/pkg # Get the git commit -GIT_COMMIT="$(git rev-parse --short=8 HEAD)" +VERSION := "$(shell git describe --always)" GIT_IMPORT="github.com/tendermint/tendermint/version" # Determine the arch/os combos we're building for @@ -41,7 +41,7 @@ for arch in "${arch_list[@]}"; do for os in "${os_list[@]}"; do if [[ "$XC_EXCLUDE" != *" $os/$arch "* ]]; then echo "--> $os/$arch" - GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/tendermint" ./cmd/tendermint + GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.TMCoreSemVer=${VERSION}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/tendermint" ./cmd/tendermint fi done done diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py index 16647c05f4..bc446c7695 100644 --- a/scripts/linkify_changelog.py +++ b/scripts/linkify_changelog.py @@ -3,11 +3,11 @@ # This script goes through the provided file, and replaces any " \#", # with the valid mark down formatted link to it. e.g. -# " [\#number](https://github.com/tendermint/tendermint/issues/) -# Note that if the number is for a PR, github will auto-redirect you when you click the link. +# " [\#number](https://github.com/tendermint/tendermint/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. # It is safe to run the script multiple times in succession. # # Example usage $ python3 linkify_changelog.py ../CHANGELOG_PENDING.md for line in fileinput.input(inplace=1): - line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/tendermint/tendermint/issues/\1)", line.rstrip()) + line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/tendermint/tendermint/pull/\1)", line.rstrip()) print(line) \ No newline at end of file diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 16f0ec733d..51b1cc6d33 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -2,20 +2,7 @@ set -eo pipefail -proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) -for dir in $proto_dirs; do - buf protoc \ - -I "proto" \ - -I "third_party/proto" \ - --gogofaster_out=\ -Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\ -Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,\ -plugins=grpc,paths=source_relative:. \ - $(find "${dir}" -maxdepth 1 -name '*.proto') -done - -cp -r ./tendermint/* ./proto/* -rm -rf tendermint +buf generate --path proto/tendermint mv ./proto/tendermint/abci/types.pb.go ./abci/types diff --git a/scripts/publish.sh b/scripts/publish.sh deleted file mode 100755 index 7da299aafa..0000000000 --- a/scripts/publish.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -VERSION=$1 -DIST_DIR=./build/dist - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" 'TMCoreSemVer =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Copying ${DIST_DIR} to S3..." - -# copy to s3 -aws s3 cp --recursive ${DIST_DIR} s3://tendermint/binaries/tendermint/v${VERSION} --acl public-read - -exit 0 diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index 8c40d36b6c..0000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" 'TMCoreSemVer =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Releasing version $VERSION..." - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd "$DIR" - -# Building binaries -sh -c "'$DIR/scripts/dist.sh'" - -# Pushing binaries to S3 -sh -c "'$DIR/scripts/publish.sh'" - -# echo "==> Crafting a Github release" -# today=$(date +"%B-%d-%Y") -# ghr -b "https://github.com/tendermint/tendermint/blob/master/CHANGELOG.md#${VERSION//.}-${today,}" "v$VERSION" "$DIR/build/dist" - -# Build and push Docker image - -## Get SHA256SUM of the linux archive -SHA256SUM=$(shasum -a256 "${DIR}/build/dist/tendermint_${VERSION}_linux_amd64.zip" | awk '{print $1;}') - -## Replace TM_VERSION and TM_SHA256SUM with the new values -sed -i -e "s/TM_VERSION .*/TM_VERSION $VERSION/g" "$DIR/DOCKER/Dockerfile" -sed -i -e "s/TM_SHA256SUM .*/TM_SHA256SUM $SHA256SUM/g" "$DIR/DOCKER/Dockerfile" -git commit -m "update Dockerfile" -a "$DIR/DOCKER/Dockerfile" -echo "==> TODO: update DOCKER/README.md (latest Dockerfile's hash is $(git rev-parse HEAD)) and copy it's content to https://store.docker.com/community/images/tendermint/tendermint" - -pushd "$DIR/DOCKER" - -## Build Docker image -TAG=$VERSION sh -c "'./build.sh'" - -## Push Docker image -TAG=$VERSION sh -c "'./push.sh'" - -popd - -exit 0 diff --git a/scripts/release_management/README.md b/scripts/release_management/README.md deleted file mode 100644 index c0d49e2e87..0000000000 --- a/scripts/release_management/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# Release management scripts - -## Overview -The scripts in this folder are used for release management in CircleCI. Although the scripts are fully configurable using input parameters, -the default settings were modified to accommodate CircleCI execution. - -# Build scripts -These scripts help during the build process. They prepare the release files. - -## bump-semver.py -Bumps the semantic version of the input `--version`. Versions are expected in vMAJOR.MINOR.PATCH format or vMAJOR.MINOR format. - -In vMAJOR.MINOR format, the result will be patch version 0 of that version, for example `v1.2 -> v1.2.0`. - -In vMAJOR.MINOR.PATCH format, the result will be a bumped PATCH version, for example `v1.2.3 -> v1.2.4`. - -If the PATCH number contains letters, it is considered a development version, in which case, the result is the non-development version of that number. -The patch number will not be bumped, only the "-dev" or similar additional text will be removed. For example: `v1.2.6-rc1 -> v1.2.6`. - -## zip-file.py -Specialized ZIP command for release management. Special features: -1. Uses Python ZIP libaries, so the `zip` command does not need to be installed. -1. Can only zip one file. -1. Optionally gets file version, Go OS and architecture. -1. By default all inputs and output is formatted exactly how CircleCI needs it. - -By default, the command will try to ZIP the file at `build/tendermint_${GOOS}_${GOARCH}`. -This can be changed with the `--file` input parameter. - -By default, the command will output the ZIP file to `build/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. -This can be changed with the `--destination` (folder), `--version`, `--goos` and `--goarch` input parameters respectively. - -## sha-files.py -Specialized `shasum` command for release management. Special features: -1. Reads all ZIP files in the given folder. -1. By default all inputs and output is formatted exactly how CircleCI needs it. - -By default, the command will look up all ZIP files in the `build/` folder. - -By default, the command will output results into the `build/SHA256SUMS` file. - -# GitHub management -Uploading build results to GitHub requires at least these steps: -1. Create a new release on GitHub with content -2. Upload all binaries to the release -3. Publish the release -The below scripts help with these steps. - -## github-draft.py -Creates a GitHub release and fills the content with the CHANGELOG.md link. The version number can be changed by the `--version` parameter. - -By default, the command will use the tendermint/tendermint organization/repo, which can be changed using the `--org` and `--repo` parameters. - -By default, the command will get the version number from the `${CIRCLE_TAG}` variable. - -Returns the GitHub release ID. - -## github-upload.py -Upload a file to a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. - -By default, the command will upload the file `/tmp/workspace/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. This can be changed by the `--file` input parameter. - -## github-publish.py -Publish a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py deleted file mode 100755 index ce56d8d7c1..0000000000 --- a/scripts/release_management/bump-semver.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -# Bump the release number of a semantic version number and print it. --version is required. -# Version is -# - vA.B.C, in which case vA.B.C+1 will be returned -# - vA.B.C-devorwhatnot in which case vA.B.C will be returned -# - vA.B in which case vA.B.0 will be returned - -import re -import argparse -import sys - - -def semver(ver): - if re.match('v[0-9]+\.[0-9]+',ver) is None: - ver="v0.0" - #raise argparse.ArgumentTypeError('--version must be a semantic version number with major, minor and patch numbers') - return ver - - -def get_tendermint_version(): - """Extracts the current Tendermint version from version/version.go""" - pattern = re.compile(r"TMCoreSemVer = \"(?P([0-9.]+)+)\"") - with open("version/version.go", "rt") as version_file: - for line in version_file: - m = pattern.search(line) - if m: - return m.group('version') - - return None - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) - args = parser.parse_args() - - found = re.match('(v[0-9]+\.[0-9]+)(\.(.+))?', args.version) - majorminorprefix = found.group(1) - patch = found.group(3) - if patch is None: - patch = "0-new" - - if re.match('[0-9]+$',patch) is None: - patchfound = re.match('([0-9]+)',patch) - patch = int(patchfound.group(1)) - else: - patch = int(patch) + 1 - - expected_version = "{0}.{1}".format(majorminorprefix, patch) - # if we're doing a release - if expected_version != "v0.0.0": - cur_version = get_tendermint_version() - if not cur_version: - print("Failed to obtain Tendermint version from version/version.go") - sys.exit(1) - expected_version_noprefix = expected_version.lstrip("v") - if expected_version_noprefix != "0.0.0" and expected_version_noprefix != cur_version: - print("Expected version/version.go#TMCoreSemVer to be {0}, but was {1}".format(expected_version_noprefix, cur_version)) - sys.exit(1) - - print(expected_version) diff --git a/scripts/release_management/github-draft.py b/scripts/release_management/github-draft.py deleted file mode 100755 index 8a189d53e3..0000000000 --- a/scripts/release_management/github-draft.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -# Create a draft release on GitHub. By default in the tendermint/tendermint repo. -# Optimized for CircleCI - -import argparse -import httplib -import json -import os -from base64 import b64encode - -def request(org, repo, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3+json', - 'Authorization': 'Basic %s' % user_and_pass - } - - conn = httplib.HTTPSConnection('api.github.com', timeout=5) - conn.request('POST', '/repos/{0}/{1}/releases'.format(org,repo), data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print("{0}: {1}".format(response.status, response.reason)) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -def create_draft(org,repo,branch,version): - draft = { - 'tag_name': version, - 'target_commitish': '{0}'.format(branch), - 'name': '{0} (WARNING: ALPHA SOFTWARE)'.format(version), - 'body': 'https://github.com/{0}/{1}/blob/{2}/CHANGELOG.md#{3}'.format(org,repo,branch,version.replace('.','')), - 'draft': True, - 'prerelease': False - } - data=json.dumps(draft) - return request(org, repo, data) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--org", default="tendermint", help="GitHub organization") - parser.add_argument("--repo", default="tendermint", help="GitHub repository") - parser.add_argument("--branch", default=os.environ.get('CIRCLE_BRANCH'), help="Branch to build from, e.g.: v1.0") - parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('environment variable GITHUB_USERNAME is required') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('environment variable GITHUB_TOKEN is required') - - release = create_draft(args.org,args.repo,args.branch,args.version) - - print(release["id"]) - diff --git a/scripts/release_management/github-openpr.py b/scripts/release_management/github-openpr.py deleted file mode 100755 index af0434f02d..0000000000 --- a/scripts/release_management/github-openpr.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -# Open a PR against the develop branch. --branch required. -# Optimized for CircleCI - -import json -import os -import argparse -import httplib -from base64 import b64encode - - -def request(org, repo, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3+json', - 'Authorization': 'Basic %s' % user_and_pass - } - - conn = httplib.HTTPSConnection('api.github.com', timeout=5) - conn.request('POST', '/repos/{0}/{1}/pulls'.format(org,repo), data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print(response) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--org", default="tendermint", help="GitHub organization. Defaults to tendermint.") - parser.add_argument("--repo", default="tendermint", help="GitHub repository. Defaults to tendermint.") - parser.add_argument("--head", help="The name of the branch where your changes are implemented.", required=True) - parser.add_argument("--base", help="The name of the branch you want the changes pulled into.", required=True) - parser.add_argument("--title", default="Security release {0}".format(os.environ.get('CIRCLE_TAG')), help="The title of the pull request.") - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('GITHUB_USERNAME not set.') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('GITHUB_TOKEN not set.') - - if os.environ.get('CIRCLE_TAG') is None: - raise parser.error('CIRCLE_TAG not set.') - - result = request(args.org, args.repo, data=json.dumps({'title':"{0}".format(args.title),'head':"{0}".format(args.head),'base':"{0}".format(args.base),'body':""})) - print(result['html_url']) diff --git a/scripts/release_management/github-public-newbranch.bash b/scripts/release_management/github-public-newbranch.bash deleted file mode 100644 index ca2fa13141..0000000000 --- a/scripts/release_management/github-public-newbranch.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -# github-public-newbranch.bash - create public branch from the security repository - -set -euo pipefail - -# Create new branch -BRANCH="${CIRCLE_TAG:-v0.0.0}-security-`date -u +%Y%m%d%H%M%S`" -# Check if the patch release exist already as a branch -if [ -n "`git branch | grep '${BRANCH}'`" ]; then - echo "WARNING: Branch ${BRANCH} already exists." -else - echo "Creating branch ${BRANCH}." - git branch "${BRANCH}" -fi - -# ... and check it out -git checkout "${BRANCH}" - -# Add entry to public repository -git remote add tendermint-origin git@github.com:tendermint/tendermint.git - -# Push branch and tag to public repository -git push tendermint-origin -git push tendermint-origin --tags - -# Create a PR from the public branch to the assumed release branch in public (release branch has to exist) -python -u scripts/release_management/github-openpr.py --head "${BRANCH}" --base "${BRANCH:%.*}" diff --git a/scripts/release_management/github-publish.py b/scripts/release_management/github-publish.py deleted file mode 100755 index 31071aecdb..0000000000 --- a/scripts/release_management/github-publish.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python - -# Publish an existing GitHub draft release. --id required. -# Optimized for CircleCI - -import json -import os -import argparse -import httplib -from base64 import b64encode - - -def request(org, repo, id, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3+json', - 'Authorization': 'Basic %s' % user_and_pass - } - - conn = httplib.HTTPSConnection('api.github.com', timeout=5) - conn.request('POST', '/repos/{0}/{1}/releases/{2}'.format(org,repo,id), data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print(response) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--org", default="tendermint", help="GitHub organization") - parser.add_argument("--repo", default="tendermint", help="GitHub repository") - parser.add_argument("--id", help="GitHub release ID", required=True, type=int) - parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for the release, e.g.: v1.0.0") - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('GITHUB_USERNAME not set.') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('GITHUB_TOKEN not set.') - - try: - result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}".format(args.version)})) - except IOError as e: - print(e) - result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}-autorelease".format(args.version)})) - - print(result['name']) diff --git a/scripts/release_management/github-upload.py b/scripts/release_management/github-upload.py deleted file mode 100755 index 77c76a7554..0000000000 --- a/scripts/release_management/github-upload.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python - -# Upload a file to a GitHub draft release. --id and --file are required. -# Optimized for CircleCI - -import json -import os -import re -import argparse -import mimetypes -import httplib -from base64 import b64encode - - -def request(baseurl, path, mimetype, mimeencoding, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3.raw+json', - 'Authorization': 'Basic %s' % user_and_pass, - 'Content-Type': mimetype, - 'Content-Encoding': mimeencoding - } - - conn = httplib.HTTPSConnection(baseurl, timeout=5) - conn.request('POST', path, data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print(response) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--id", help="GitHub release ID", required=True, type=int) - parser.add_argument("--file", default="/tmp/workspace/tendermint_{0}_{1}_{2}.zip".format(os.environ.get('CIRCLE_TAG'),os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to upload") - parser.add_argument("--return-id-only", help="Return only the release ID after upload to GitHub.", action='store_true') - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('GITHUB_USERNAME not set.') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('GITHUB_TOKEN not set.') - - mimetypes.init() - filename = os.path.basename(args.file) - mimetype,mimeencoding = mimetypes.guess_type(filename, strict=False) - if mimetype is None: - mimetype = 'application/zip' - if mimeencoding is None: - mimeencoding = 'utf8' - - with open(args.file,'rb') as f: - asset = f.read() - - result = request('uploads.github.com', '/repos/tendermint/tendermint/releases/{0}/assets?name={1}'.format(args.id, filename), mimetype, mimeencoding, asset) - - if args.return_id_only: - print(result['id']) - else: - print(result['browser_download_url']) - diff --git a/scripts/release_management/sha-files.py b/scripts/release_management/sha-files.py deleted file mode 100755 index 2a9ee0d594..0000000000 --- a/scripts/release_management/sha-files.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -# Create SHA256 summaries from all ZIP files in a folder -# Optimized for CircleCI - -import re -import os -import argparse -import zipfile -import hashlib - - -BLOCKSIZE = 65536 - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--folder", default="/tmp/workspace", help="Folder to look for, for ZIP files") - parser.add_argument("--shafile", default="/tmp/workspace/SHA256SUMS", help="SHA256 summaries File") - args = parser.parse_args() - - for filename in os.listdir(args.folder): - if re.search('\.zip$',filename) is None: - continue - if not os.path.isfile(os.path.join(args.folder, filename)): - continue - with open(args.shafile,'a+') as shafile: - hasher = hashlib.sha256() - with open(os.path.join(args.folder, filename),'r') as f: - buf = f.read(BLOCKSIZE) - while len(buf) > 0: - hasher.update(buf) - buf = f.read(BLOCKSIZE) - shafile.write("{0} {1}\n".format(hasher.hexdigest(),filename)) - diff --git a/scripts/release_management/zip-file.py b/scripts/release_management/zip-file.py deleted file mode 100755 index 9785df6642..0000000000 --- a/scripts/release_management/zip-file.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python - -# ZIP one file as "tendermint" into a ZIP like tendermint_VERSION_OS_ARCH.zip -# Use environment variables CIRCLE_TAG, GOOS and GOARCH for easy input parameters. -# Optimized for CircleCI - -import os -import argparse -import zipfile -import hashlib - - -BLOCKSIZE = 65536 - - -def zip_asset(file,destination,arcname,version,goos,goarch): - filename = os.path.basename(file) - output = "{0}/{1}_{2}_{3}_{4}.zip".format(destination,arcname,version,goos,goarch) - - with zipfile.ZipFile(output,'w') as f: - f.write(filename=file,arcname=arcname) - f.comment=filename - return output - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--file", default="build/lazyledger-core_{0}_{1}".format(os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to zip") - parser.add_argument("--destination", default="build", help="Destination folder for files") - parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") - parser.add_argument("--goos", default=os.environ.get('GOOS'), help="GOOS parameter") - parser.add_argument("--goarch", default=os.environ.get('GOARCH'), help="GOARCH parameter") - args = parser.parse_args() - - if args.version is None: - raise parser.error("argument --version is required") - if args.goos is None: - raise parser.error("argument --goos is required") - if args.goarch is None: - raise parser.error("argument --goarch is required") - - file = zip_asset(args.file,args.destination,"lazyledger-core",args.version,args.goos,args.goarch) - print(file) - diff --git a/state/execution.go b/state/execution.go index 60a119acf3..2666b3f9c0 100644 --- a/state/execution.go +++ b/state/execution.go @@ -1,6 +1,7 @@ package state import ( + "context" "errors" "fmt" "time" @@ -125,7 +126,10 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // https://github.com/lazyledger/lazyledger-specs/blob/53e5f350838f1e0785ad670704bf91dac2f4f5a3/specs/block_proposer.md#deciding-on-a-block-size // Here, we instead assume a fixed (max) square size instead. // 2. feed them into MakeBlock below: - processedBlockTxs, err := blockExec.proxyApp.PreprocessTxsSync(abci.RequestPreprocessTxs{Txs: bzs}) + processedBlockTxs, err := blockExec.proxyApp.PreprocessTxsSync( + context.Background(), + abci.RequestPreprocessTxs{Txs: bzs}, + ) if err != nil { // The App MUST ensure that only valid (and hence 'processable') // Tx enter the mempool. Hence, at this point, we can't have any non-processable @@ -159,7 +163,11 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { - return validateBlock(blockExec.evpool, state, block) + err := validateBlock(state, block) + if err != nil { + return err + } + return blockExec.evpool.CheckEvidence(block.Evidence.Evidence) } // ApplyBlock validates the block against the state, executes it against the app, @@ -172,16 +180,13 @@ func (blockExec *BlockExecutor) ApplyBlock( state State, blockID types.BlockID, block *types.Block, ) (State, int64, error) { - if err := blockExec.ValidateBlock(state, block); err != nil { + if err := validateBlock(state, block); err != nil { return state, 0, ErrInvalidBlock(err) } - // Update evpool with the block and state and get any byzantine validators for that block - byzVals := blockExec.evpool.ABCIEvidence(block.Height, block.Evidence.Evidence) - startTime := time.Now().UnixNano() abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, - blockExec.store, state.InitialHeight, byzVals) + blockExec.store, state.InitialHeight) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { @@ -224,7 +229,7 @@ func (blockExec *BlockExecutor) ApplyBlock( } // Update evpool with the latest state. - blockExec.evpool.Update(state) + blockExec.evpool.Update(state, block.Evidence.Evidence) fail.Fail() // XXX @@ -266,7 +271,7 @@ func (blockExec *BlockExecutor) Commit( } // Commit block, get hash back - res, err := blockExec.proxyApp.CommitSync() + res, err := blockExec.proxyApp.CommitSync(context.Background()) if err != nil { blockExec.logger.Error( "Client error during proxyAppConn.CommitSync", @@ -306,7 +311,6 @@ func execBlockOnProxyApp( block *types.Block, store Store, initialHeight int64, - byzVals []abci.Evidence, ) (*tmstate.ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 @@ -336,18 +340,27 @@ func execBlockOnProxyApp( commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight) + byzVals := make([]abci.Evidence, 0) + for _, evidence := range block.Evidence.Evidence { + byzVals = append(byzVals, evidence.ABCI()...) + } + + ctx := context.Background() + // Begin block var err error pbh := block.Header.ToProto() if pbh == nil { return nil, errors.New("nil header") } - abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ - Hash: block.Hash(), - Header: *pbh, - LastCommitInfo: commitInfo, - ByzantineValidators: byzVals, - }) + abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync( + ctx, + abci.RequestBeginBlock{ + Hash: block.Hash(), + Header: *pbh, + LastCommitInfo: commitInfo, + ByzantineValidators: byzVals, + }) if err != nil { logger.Error("Error in proxyAppConn.BeginBlock", "err", err) return nil, err @@ -355,14 +368,14 @@ func execBlockOnProxyApp( // Run txs of block. for _, tx := range block.Txs { - proxyAppConn.DeliverTxAsync(abci.RequestDeliverTx{Tx: tx}) - if err := proxyAppConn.Error(); err != nil { + _, err = proxyAppConn.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: tx}) + if err != nil { return nil, err } } // End block. - abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{Height: block.Height}) + abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(ctx, abci.RequestEndBlock{Height: block.Height}) if err != nil { logger.Error("Error in proxyAppConn.EndBlock", "err", err) return nil, err @@ -570,13 +583,13 @@ func ExecCommitBlock( store Store, initialHeight int64, ) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, store, initialHeight, []abci.Evidence{}) + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, store, initialHeight) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err } // Commit block, get hash back - res, err := appConnConsensus.CommitSync() + res, err := appConnConsensus.CommitSync(context.Background()) if err != nil { logger.Error("Client error during proxyAppConn.CommitSync", "err", res) return nil, err diff --git a/state/execution_test.go b/state/execution_test.go index 9af707d457..b6ea333d14 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -10,16 +10,20 @@ import ( "github.com/stretchr/testify/require" abci "github.com/lazyledger/lazyledger-core/abci/types" + "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/ed25519" cryptoenc "github.com/lazyledger/lazyledger-core/crypto/encoding" + "github.com/lazyledger/lazyledger-core/crypto/tmhash" "github.com/lazyledger/lazyledger-core/libs/log" mmock "github.com/lazyledger/lazyledger-core/mempool/mock" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + tmversion "github.com/lazyledger/lazyledger-core/proto/tendermint/version" "github.com/lazyledger/lazyledger-core/proxy" sm "github.com/lazyledger/lazyledger-core/state" "github.com/lazyledger/lazyledger-core/state/mocks" "github.com/lazyledger/lazyledger-core/types" tmtime "github.com/lazyledger/lazyledger-core/types/time" + "github.com/lazyledger/lazyledger-core/version" ) var ( @@ -126,10 +130,52 @@ func TestBeginBlockByzantineValidators(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, _ := makeState(1, 1) + state, stateDB, privVals := makeState(1, 1) stateStore := sm.NewStore(stateDB) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + privVal := privVals[state.Validators.Validators[0].Address.String()] + blockID := makeBlockID([]byte("headerhash"), 1000, []byte("partshash")) + header := &types.Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 1}, + ChainID: state.ChainID, + Height: 10, + Time: defaultEvidenceTime, + LastBlockID: blockID, + LastCommitHash: crypto.CRandBytes(tmhash.Size), + DataHash: crypto.CRandBytes(tmhash.Size), + ValidatorsHash: state.Validators.Hash(), + NextValidatorsHash: state.Validators.Hash(), + ConsensusHash: crypto.CRandBytes(tmhash.Size), + AppHash: crypto.CRandBytes(tmhash.Size), + LastResultsHash: crypto.CRandBytes(tmhash.Size), + EvidenceHash: crypto.CRandBytes(tmhash.Size), + ProposerAddress: crypto.CRandBytes(crypto.AddressSize), + } + + // we don't need to worry about validating the evidence as long as they pass validate basic + dve := types.NewMockDuplicateVoteEvidenceWithValidator(3, defaultEvidenceTime, privVal, state.ChainID) + dve.ValidatorPower = 1000 + lcae := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: header, + Commit: types.NewCommit(10, 0, makeBlockID(header.Hash(), 100, []byte("partshash")), []types.CommitSig{{ + BlockIDFlag: types.BlockIDFlagNil, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + Timestamp: defaultEvidenceTime, + Signature: crypto.CRandBytes(types.MaxSignatureSize), + }}), + }, + ValidatorSet: state.Validators, + }, + CommonHeight: 8, + ByzantineValidators: []*types.Validator{state.Validators.Validators[0]}, + TotalVotingPower: 12, + Timestamp: defaultEvidenceTime, + } + + ev := []types.Evidence{dve, lcae} abciEv := []abci.Evidence{ { @@ -137,7 +183,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { Height: 3, Time: defaultEvidenceTime, Validator: types.TM2PB.Validator(state.Validators.Validators[0]), - TotalVotingPower: 33, + TotalVotingPower: 10, }, { Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, @@ -149,15 +195,17 @@ func TestBeginBlockByzantineValidators(t *testing.T) { } evpool := &mocks.EvidencePool{} - evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), mock.AnythingOfType("[]types.Evidence")).Return(abciEv) - evpool.On("Update", mock.AnythingOfType("state.State")).Return() + evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return(ev, int64(100)) + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, evpool) block := makeBlock(state, 1) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block.Evidence = types.EvidenceData{Evidence: ev} + block.Header.EvidenceHash = block.Evidence.Hash() + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) @@ -401,3 +449,19 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { assert.NotNil(t, err) assert.NotEmpty(t, state.NextValidators.Validators) } + +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { + var ( + h = make([]byte, tmhash.Size) + psH = make([]byte, tmhash.Size) + ) + copy(h, hash) + copy(psH, partSetHash) + return types.BlockID{ + Hash: h, + PartSetHeader: types.PartSetHeader{ + Total: partSetSize, + Hash: psH, + }, + } +} diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index 1a40964474..62ef74b0e8 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -1,12 +1,10 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks import ( - abcitypes "github.com/lazyledger/lazyledger-core/abci/types" - mock "github.com/stretchr/testify/mock" - state "github.com/lazyledger/lazyledger-core/state" + mock "github.com/stretchr/testify/mock" types "github.com/lazyledger/lazyledger-core/types" ) @@ -16,22 +14,6 @@ type EvidencePool struct { mock.Mock } -// ABCIEvidence provides a mock function with given fields: _a0, _a1 -func (_m *EvidencePool) ABCIEvidence(_a0 int64, _a1 []types.Evidence) []abcitypes.Evidence { - ret := _m.Called(_a0, _a1) - - var r0 []abcitypes.Evidence - if rf, ok := ret.Get(0).(func(int64, []types.Evidence) []abcitypes.Evidence); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]abcitypes.Evidence) - } - } - - return r0 -} - // AddEvidence provides a mock function with given fields: _a0 func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { ret := _m.Called(_a0) @@ -83,7 +65,7 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64 return r0, r1 } -// Update provides a mock function with given fields: _a0 -func (_m *EvidencePool) Update(_a0 state.State) { - _m.Called(_a0) +// Update provides a mock function with given fields: _a0, _a1 +func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { + _m.Called(_a0, _a1) } diff --git a/state/mocks/store.go b/state/mocks/store.go index 0214028189..5a33aae9e7 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks diff --git a/state/services.go b/state/services.go index 70cb91f644..eef7dc854d 100644 --- a/state/services.go +++ b/state/services.go @@ -1,9 +1,6 @@ package state import ( - "time" - - abci "github.com/lazyledger/lazyledger-core/abci/types" "github.com/lazyledger/lazyledger-core/types" ) @@ -45,9 +42,8 @@ type BlockStore interface { type EvidencePool interface { PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) AddEvidence(types.Evidence) error - Update(State) + Update(State, types.EvidenceList) CheckEvidence(types.EvidenceList) error - ABCIEvidence(int64, []types.Evidence) []abci.Evidence } // EmptyEvidencePool is an empty implementation of EvidencePool, useful for testing. It also complies @@ -58,11 +54,8 @@ func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, s return nil, 0 } func (EmptyEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (EmptyEvidencePool) Update(State) {} +func (EmptyEvidencePool) Update(State, types.EvidenceList) {} func (EmptyEvidencePool) CheckEvidence(evList types.EvidenceList) error { return nil } -func (EmptyEvidencePool) ABCIEvidence(int64, []types.Evidence) []abci.Evidence { - return []abci.Evidence{} -} -func (EmptyEvidencePool) AddEvidenceFromConsensus(types.Evidence, time.Time, *types.ValidatorSet) error { +func (EmptyEvidencePool) AddEvidenceFromConsensus(evidence types.Evidence) error { return nil } diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 81744e4afe..1ec8277a97 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -25,8 +25,8 @@ func TestTxFilter(t *testing.T) { tx types.Tx isErr bool }{ - {types.Tx(tmrand.Bytes(2181)), false}, - {types.Tx(tmrand.Bytes(2188)), true}, + {types.Tx(tmrand.Bytes(2149)), false}, + {types.Tx(tmrand.Bytes(2150)), true}, {types.Tx(tmrand.Bytes(3000)), true}, } diff --git a/state/validation.go b/state/validation.go index b14eb4f2fb..c116063f81 100644 --- a/state/validation.go +++ b/state/validation.go @@ -11,7 +11,7 @@ import ( //----------------------------------------------------- // Validate block -func validateBlock(evidencePool EvidencePool, state State, block *types.Block) error { +func validateBlock(state State, block *types.Block) error { // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err @@ -141,6 +141,5 @@ func validateBlock(evidencePool EvidencePool, state State, block *types.Block) e return types.NewErrEvidenceOverflow(max, got) } - // Validate all evidence. - return evidencePool.CheckEvidence(block.Evidence.Evidence) + return nil } diff --git a/state/validation_test.go b/state/validation_test.go index 42e3acdfb0..e5518366c2 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -237,7 +237,7 @@ func TestValidateBlockEvidence(t *testing.T) { evpool := &mocks.EvidencePool{} evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) - evpool.On("Update", mock.AnythingOfType("state.State")).Return() + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), mock.AnythingOfType("[]types.Evidence")).Return( []abci.Evidence{}) diff --git a/statesync/chunks.go b/statesync/chunks.go index 5e6e4a3239..96f007a781 100644 --- a/statesync/chunks.go +++ b/statesync/chunks.go @@ -22,7 +22,7 @@ type chunk struct { Format uint32 Index uint32 Chunk []byte - Sender p2p.ID + Sender p2p.PeerID } // chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an @@ -33,7 +33,7 @@ type chunkQueue struct { snapshot *snapshot // if this is nil, the queue has been closed dir string // temp dir for on-disk chunk storage chunkFiles map[uint32]string // path to temporary chunk file - chunkSenders map[uint32]p2p.ID // the peer who sent the given chunk + chunkSenders map[uint32]p2p.PeerID // the peer who sent the given chunk chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate() chunkReturned map[uint32]bool // chunks returned via Next() waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival @@ -49,11 +49,12 @@ func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { if snapshot.Chunks == 0 { return nil, errors.New("snapshot has no chunks") } + return &chunkQueue{ snapshot: snapshot, dir: dir, chunkFiles: make(map[uint32]string, snapshot.Chunks), - chunkSenders: make(map[uint32]p2p.ID, snapshot.Chunks), + chunkSenders: make(map[uint32]p2p.PeerID, snapshot.Chunks), chunkAllocated: make(map[uint32]bool, snapshot.Chunks), chunkReturned: make(map[uint32]bool, snapshot.Chunks), waiters: make(map[uint32][]chan<- uint32), @@ -65,8 +66,10 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { if chunk == nil || chunk.Chunk == nil { return false, errors.New("cannot add nil chunk") } + q.Lock() defer q.Unlock() + if q.snapshot == nil { return false, nil // queue is closed } @@ -88,6 +91,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } + q.chunkFiles[chunk.Index] = path q.chunkSenders[chunk.Index] = chunk.Sender @@ -96,6 +100,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { waiter <- chunk.Index close(waiter) } + delete(q.waiters, chunk.Index) return true, nil @@ -106,18 +111,22 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { func (q *chunkQueue) Allocate() (uint32, error) { q.Lock() defer q.Unlock() + if q.snapshot == nil { return 0, errDone } + if uint32(len(q.chunkAllocated)) >= q.snapshot.Chunks { return 0, errDone } + for i := uint32(0); i < q.snapshot.Chunks; i++ { if !q.chunkAllocated[i] { q.chunkAllocated[i] = true return i, nil } } + return 0, errDone } @@ -125,20 +134,24 @@ func (q *chunkQueue) Allocate() (uint32, error) { func (q *chunkQueue) Close() error { q.Lock() defer q.Unlock() + if q.snapshot == nil { return nil } + for _, waiters := range q.waiters { for _, waiter := range waiters { close(waiter) } } + q.waiters = nil q.snapshot = nil - err := os.RemoveAll(q.dir) - if err != nil { + + if err := os.RemoveAll(q.dir); err != nil { return fmt.Errorf("failed to clean up state sync tempdir %v: %w", q.dir, err) } + return nil } @@ -156,40 +169,46 @@ func (q *chunkQueue) discard(index uint32) error { if q.snapshot == nil { return nil } + path := q.chunkFiles[index] if path == "" { return nil } - err := os.Remove(path) - if err != nil { + + if err := os.Remove(path); err != nil { return fmt.Errorf("failed to remove chunk %v: %w", index, err) } + delete(q.chunkFiles, index) delete(q.chunkReturned, index) delete(q.chunkAllocated, index) + return nil } // DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to // discard already returned chunks, this can be done via Discard(). -func (q *chunkQueue) DiscardSender(peerID p2p.ID) error { +func (q *chunkQueue) DiscardSender(peerID p2p.PeerID) error { q.Lock() defer q.Unlock() for index, sender := range q.chunkSenders { - if sender == peerID && !q.chunkReturned[index] { + if sender.Equal(peerID) && !q.chunkReturned[index] { err := q.discard(index) if err != nil { return err } + delete(q.chunkSenders, index) } } + return nil } -// GetSender returns the sender of the chunk with the given index, or empty if not found. -func (q *chunkQueue) GetSender(index uint32) p2p.ID { +// GetSender returns the sender of the chunk with the given index, or empty if +// not found. +func (q *chunkQueue) GetSender(index uint32) p2p.PeerID { q.Lock() defer q.Unlock() return q.chunkSenders[index] @@ -209,10 +228,12 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { if !ok { return nil, nil } + body, err := ioutil.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) } + return &chunk{ Height: q.snapshot.Height, Format: q.snapshot.Format, @@ -226,6 +247,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { // blocks until the chunk is available. Concurrent Next() calls may return the same chunk. func (q *chunkQueue) Next() (*chunk, error) { q.Lock() + var chunk *chunk index, err := q.nextUp() if err == nil { @@ -234,7 +256,9 @@ func (q *chunkQueue) Next() (*chunk, error) { q.chunkReturned[index] = true } } + q.Unlock() + if chunk != nil || err != nil { return chunk, err } @@ -250,10 +274,12 @@ func (q *chunkQueue) Next() (*chunk, error) { q.Lock() defer q.Unlock() + chunk, err = q.load(index) if err != nil { return nil, err } + q.chunkReturned[index] = true return chunk, nil } @@ -264,11 +290,13 @@ func (q *chunkQueue) nextUp() (uint32, error) { if q.snapshot == nil { return 0, errDone } + for i := uint32(0); i < q.snapshot.Chunks; i++ { if !q.chunkReturned[i] { return i, nil } } + return 0, errDone } @@ -290,9 +318,11 @@ func (q *chunkQueue) RetryAll() { func (q *chunkQueue) Size() uint32 { q.Lock() defer q.Unlock() + if q.snapshot == nil { return 0 } + return q.snapshot.Chunks } @@ -302,20 +332,26 @@ func (q *chunkQueue) Size() uint32 { func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { q.Lock() defer q.Unlock() + ch := make(chan uint32, 1) switch { case q.snapshot == nil: close(ch) + case index >= q.snapshot.Chunks: close(ch) + case q.chunkFiles[index] != "": ch <- index close(ch) + default: if q.waiters[index] == nil { q.waiters[index] = make([]chan<- uint32, 0) } + q.waiters[index] = append(q.waiters[index], ch) } + return ch } diff --git a/statesync/chunks_test.go b/statesync/chunks_test.go index ecf50c767f..d9c3b8d891 100644 --- a/statesync/chunks_test.go +++ b/statesync/chunks_test.go @@ -274,7 +274,7 @@ func TestChunkQueue_DiscardSender(t *testing.T) { defer teardown() // Allocate and add all chunks to the queue - senders := []p2p.ID{"a", "b", "c"} + senders := []p2p.PeerID{p2p.PeerID("a"), p2p.PeerID("b"), p2p.PeerID("c")} for i := uint32(0); i < queue.Size(); i++ { _, err := queue.Allocate() require.NoError(t, err) @@ -295,14 +295,14 @@ func TestChunkQueue_DiscardSender(t *testing.T) { } // Discarding an unknown sender should do nothing - err := queue.DiscardSender("x") + err := queue.DiscardSender(p2p.PeerID("x")) require.NoError(t, err) _, err = queue.Allocate() assert.Equal(t, errDone, err) // Discarding sender b should discard chunk 4, but not chunk 1 which has already been // returned. - err = queue.DiscardSender("b") + err = queue.DiscardSender(p2p.PeerID("b")) require.NoError(t, err) index, err := queue.Allocate() require.NoError(t, err) @@ -315,21 +315,24 @@ func TestChunkQueue_GetSender(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: p2p.ID("a")}) + peerAID := p2p.PeerID{0xaa} + peerBID := p2p.PeerID{0xbb} + + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: peerAID}) require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: p2p.ID("b")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: peerBID}) require.NoError(t, err) - assert.EqualValues(t, "a", queue.GetSender(0)) - assert.EqualValues(t, "b", queue.GetSender(1)) - assert.EqualValues(t, "", queue.GetSender(2)) + assert.Equal(t, "aa", queue.GetSender(0).String()) + assert.Equal(t, "bb", queue.GetSender(1).String()) + assert.Equal(t, "", queue.GetSender(2).String()) // After the chunk has been processed, we should still know who the sender was chunk, err := queue.Next() require.NoError(t, err) require.NotNil(t, chunk) require.EqualValues(t, 0, chunk.Index) - assert.EqualValues(t, "a", queue.GetSender(0)) + assert.Equal(t, "aa", queue.GetSender(0).String()) } func TestChunkQueue_Next(t *testing.T) { @@ -351,7 +354,7 @@ func TestChunkQueue_Next(t *testing.T) { }() assert.Empty(t, chNext) - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}) + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.PeerID("b")}) require.NoError(t, err) select { case <-chNext: @@ -359,17 +362,17 @@ func TestChunkQueue_Next(t *testing.T) { default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.PeerID("a")}) require.NoError(t, err) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}, + &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.PeerID("a")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}, + &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.PeerID("b")}, <-chNext) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.PeerID("e")}) require.NoError(t, err) select { case <-chNext: @@ -377,19 +380,19 @@ func TestChunkQueue_Next(t *testing.T) { default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.PeerID("c")}) require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.PeerID("d")}) require.NoError(t, err) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}, + &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.PeerID("c")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}, + &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.PeerID("d")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}, + &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.PeerID("e")}, <-chNext) _, ok := <-chNext diff --git a/statesync/messages.go b/statesync/messages.go deleted file mode 100644 index 7804be810f..0000000000 --- a/statesync/messages.go +++ /dev/null @@ -1,97 +0,0 @@ -package statesync - -import ( - "errors" - "fmt" - - "github.com/gogo/protobuf/proto" - - ssproto "github.com/lazyledger/lazyledger-core/proto/tendermint/statesync" -) - -const ( - // snapshotMsgSize is the maximum size of a snapshotResponseMessage - snapshotMsgSize = int(4e6) - // chunkMsgSize is the maximum size of a chunkResponseMessage - chunkMsgSize = int(16e6) -) - -// mustEncodeMsg encodes a Protobuf message, panicing on error. -func mustEncodeMsg(pb proto.Message) []byte { - msg := ssproto.Message{} - switch pb := pb.(type) { - case *ssproto.ChunkRequest: - msg.Sum = &ssproto.Message_ChunkRequest{ChunkRequest: pb} - case *ssproto.ChunkResponse: - msg.Sum = &ssproto.Message_ChunkResponse{ChunkResponse: pb} - case *ssproto.SnapshotsRequest: - msg.Sum = &ssproto.Message_SnapshotsRequest{SnapshotsRequest: pb} - case *ssproto.SnapshotsResponse: - msg.Sum = &ssproto.Message_SnapshotsResponse{SnapshotsResponse: pb} - default: - panic(fmt.Errorf("unknown message type %T", pb)) - } - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -// decodeMsg decodes a Protobuf message. -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &ssproto.Message{} - err := proto.Unmarshal(bz, pb) - if err != nil { - return nil, err - } - switch msg := pb.Sum.(type) { - case *ssproto.Message_ChunkRequest: - return msg.ChunkRequest, nil - case *ssproto.Message_ChunkResponse: - return msg.ChunkResponse, nil - case *ssproto.Message_SnapshotsRequest: - return msg.SnapshotsRequest, nil - case *ssproto.Message_SnapshotsResponse: - return msg.SnapshotsResponse, nil - default: - return nil, fmt.Errorf("unknown message type %T", msg) - } -} - -// validateMsg validates a message. -func validateMsg(pb proto.Message) error { - if pb == nil { - return errors.New("message cannot be nil") - } - switch msg := pb.(type) { - case *ssproto.ChunkRequest: - if msg.Height == 0 { - return errors.New("height cannot be 0") - } - case *ssproto.ChunkResponse: - if msg.Height == 0 { - return errors.New("height cannot be 0") - } - if msg.Missing && len(msg.Chunk) > 0 { - return errors.New("missing chunk cannot have contents") - } - if !msg.Missing && msg.Chunk == nil { - return errors.New("chunk cannot be nil") - } - case *ssproto.SnapshotsRequest: - case *ssproto.SnapshotsResponse: - if msg.Height == 0 { - return errors.New("height cannot be 0") - } - if len(msg.Hash) == 0 { - return errors.New("snapshot has no hash") - } - if msg.Chunks == 0 { - return errors.New("snapshot has no chunks") - } - default: - return fmt.Errorf("unknown message type %T", msg) - } - return nil -} diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go index b2824b02eb..c86bd750b1 100644 --- a/statesync/mocks/state_provider.go +++ b/statesync/mocks/state_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.3.0. DO NOT EDIT. +// Code generated by mockery v2.4.0-beta. DO NOT EDIT. package mocks diff --git a/statesync/reactor.go b/statesync/reactor.go index ae1c3b43db..070482b7c5 100644 --- a/statesync/reactor.go +++ b/statesync/reactor.go @@ -1,11 +1,15 @@ package statesync import ( + "context" "errors" + "fmt" "sort" "time" abci "github.com/lazyledger/lazyledger-core/abci/types" + "github.com/lazyledger/lazyledger-core/libs/log" + "github.com/lazyledger/lazyledger-core/libs/service" tmsync "github.com/lazyledger/lazyledger-core/libs/sync" "github.com/lazyledger/lazyledger-core/p2p" ssproto "github.com/lazyledger/lazyledger-core/proto/tendermint/statesync" @@ -14,211 +18,435 @@ import ( "github.com/lazyledger/lazyledger-core/types" ) +var ( + _ service.Service = (*Reactor)(nil) + _ p2p.Wrapper = (*ssproto.Message)(nil) + + // ChannelShims contains a map of ChannelDescriptorShim objects, where each + // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding + // p2p proto.Message the new p2p Channel is responsible for handling. + // + // + // TODO: Remove once p2p refactor is complete. + // ref: https://github.com/tendermint/tendermint/issues/5670 + ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ + SnapshotChannel: { + MsgType: new(ssproto.Message), + Descriptor: &p2p.ChannelDescriptor{ + ID: byte(SnapshotChannel), + Priority: 3, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + }, + }, + ChunkChannel: { + MsgType: new(ssproto.Message), + Descriptor: &p2p.ChannelDescriptor{ + ID: byte(ChunkChannel), + Priority: 1, + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + }, + }, + } +) + const ( // SnapshotChannel exchanges snapshot metadata - SnapshotChannel = byte(0x60) + SnapshotChannel = p2p.ChannelID(0x60) + // ChunkChannel exchanges chunk contents - ChunkChannel = byte(0x61) + ChunkChannel = p2p.ChannelID(0x61) + // recentSnapshots is the number of recent snapshots to send and receive per peer. recentSnapshots = 10 + + // snapshotMsgSize is the maximum size of a snapshotResponseMessage + snapshotMsgSize = int(4e6) + + // chunkMsgSize is the maximum size of a chunkResponseMessage + chunkMsgSize = int(16e6) ) // Reactor handles state sync, both restoring snapshots for the local node and serving snapshots // for other nodes. type Reactor struct { - p2p.BaseReactor + service.BaseService - conn proxy.AppConnSnapshot - connQuery proxy.AppConnQuery - tempDir string + conn proxy.AppConnSnapshot + connQuery proxy.AppConnQuery + tempDir string + snapshotCh *p2p.Channel + chunkCh *p2p.Channel + peerUpdates *p2p.PeerUpdatesCh + closeCh chan struct{} - // This will only be set when a state sync is in progress. It is used to feed received - // snapshots and chunks into the sync. + // This will only be set when a state sync is in progress. It is used to feed + // received snapshots and chunks into the sync. mtx tmsync.RWMutex syncer *syncer } -// NewReactor creates a new state sync reactor. -func NewReactor(conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, tempDir string) *Reactor { +// NewReactor returns a reference to a new state sync reactor, which implements +// the service.Service interface. It accepts a logger, connections for snapshots +// and querying, references to p2p Channels and a channel to listen for peer +// updates on. Note, the reactor will close all p2p Channels when stopping. +func NewReactor( + logger log.Logger, + conn proxy.AppConnSnapshot, + connQuery proxy.AppConnQuery, + snapshotCh, chunkCh *p2p.Channel, + peerUpdates *p2p.PeerUpdatesCh, + tempDir string, +) *Reactor { r := &Reactor{ - conn: conn, - connQuery: connQuery, + conn: conn, + connQuery: connQuery, + snapshotCh: snapshotCh, + chunkCh: chunkCh, + peerUpdates: peerUpdates, + closeCh: make(chan struct{}), + tempDir: tempDir, } - r.BaseReactor = *p2p.NewBaseReactor("StateSync", r) - return r -} -// GetChannels implements p2p.Reactor. -func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: SnapshotChannel, - Priority: 3, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - }, - { - ID: ChunkChannel, - Priority: 1, - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - }, - } + r.BaseService = *service.NewBaseService(logger, "StateSync", r) + return r } -// OnStart implements p2p.Reactor. +// OnStart starts separate go routines for each p2p Channel and listens for +// envelopes on each. In addition, it also listens for peer updates and handles +// messages on that p2p channel accordingly. The caller must be sure to execute +// OnStop to ensure the outbound p2p Channels are closed. No error is returned. func (r *Reactor) OnStart() error { + // Listen for envelopes on the snapshot p2p Channel in a separate go-routine + // as to not block or cause IO contention with the chunk p2p Channel. Note, + // we do not launch a go-routine to handle individual envelopes as to not + // have to deal with bounding workers or pools. + go r.processSnapshotCh() + + // Listen for envelopes on the chunk p2p Channel in a separate go-routine + // as to not block or cause IO contention with the snapshot p2p Channel. Note, + // we do not launch a go-routine to handle individual envelopes as to not + // have to deal with bounding workers or pools. + go r.processChunkCh() + + go r.processPeerUpdates() + return nil } -// AddPeer implements p2p.Reactor. -func (r *Reactor) AddPeer(peer p2p.Peer) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.syncer != nil { - r.syncer.AddPeer(peer) - } -} +// OnStop stops the reactor by signaling to all spawned goroutines to exit and +// blocking until they all exit. +func (r *Reactor) OnStop() { + // Close closeCh to signal to all spawned goroutines to gracefully exit. All + // p2p Channels should execute Close(). + close(r.closeCh) -// RemovePeer implements p2p.Reactor. -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.syncer != nil { - r.syncer.RemovePeer(peer) - } + // Wait for all p2p Channels to be closed before returning. This ensures we + // can easily reason about synchronization of all p2p Channels and ensure no + // panics will occur. + <-r.snapshotCh.Done() + <-r.chunkCh.Done() + <-r.peerUpdates.Done() } -// Receive implements p2p.Reactor. -func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - if !r.IsRunning() { - return - } - - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) - r.Switch.StopPeerForError(src, err) - return - } - err = validateMsg(msg) - if err != nil { - r.Logger.Error("Invalid message", "peer", src, "msg", msg, "err", err) - r.Switch.StopPeerForError(src, err) - return - } +// handleSnapshotMessage handles enevelopes sent from peers on the +// SnapshotChannel. It returns an error only if the Envelope.Message is unknown +// for this channel. This should never be called outside of handleMessage. +func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { + switch msg := envelope.Message.(type) { + case *ssproto.SnapshotsRequest: + snapshots, err := r.recentSnapshots(recentSnapshots) + if err != nil { + r.Logger.Error("failed to fetch snapshots", "err", err) + return nil + } - switch chID { - case SnapshotChannel: - switch msg := msg.(type) { - case *ssproto.SnapshotsRequest: - snapshots, err := r.recentSnapshots(recentSnapshots) - if err != nil { - r.Logger.Error("Failed to fetch snapshots", "err", err) - return - } - for _, snapshot := range snapshots { - r.Logger.Debug("Advertising snapshot", "height", snapshot.Height, - "format", snapshot.Format, "peer", src.ID()) - src.Send(chID, mustEncodeMsg(&ssproto.SnapshotsResponse{ + for _, snapshot := range snapshots { + r.Logger.Debug( + "advertising snapshot", + "height", snapshot.Height, + "format", snapshot.Format, + "peer", envelope.From.String(), + ) + r.snapshotCh.Out() <- p2p.Envelope{ + To: envelope.From, + Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, Format: snapshot.Format, Chunks: snapshot.Chunks, Hash: snapshot.Hash, Metadata: snapshot.Metadata, - })) + }, } + } - case *ssproto.SnapshotsResponse: - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.syncer == nil { - r.Logger.Debug("Received unexpected snapshot, no state sync in progress") - return - } - r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", src.ID()) - _, err := r.syncer.AddSnapshot(src, &snapshot{ - Height: msg.Height, - Format: msg.Format, - Chunks: msg.Chunks, - Hash: msg.Hash, - Metadata: msg.Metadata, - }) - if err != nil { - r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format, - "peer", src.ID(), "err", err) - return - } + case *ssproto.SnapshotsResponse: + r.mtx.RLock() + defer r.mtx.RUnlock() - default: - r.Logger.Error("Received unknown message %T", msg) + if r.syncer == nil { + r.Logger.Debug("received unexpected snapshot; no state sync in progress") + return nil } - case ChunkChannel: - switch msg := msg.(type) { - case *ssproto.ChunkRequest: - r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "peer", src.ID()) - resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{ - Height: msg.Height, - Format: msg.Format, - Chunk: msg.Index, - }) - if err != nil { - r.Logger.Error("Failed to load chunk", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "err", err) - return - } - r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "peer", src.ID()) - src.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkResponse{ + r.Logger.Debug( + "received snapshot", + "height", msg.Height, + "format", msg.Format, + "peer", envelope.From.String(), + ) + _, err := r.syncer.AddSnapshot(envelope.From, &snapshot{ + Height: msg.Height, + Format: msg.Format, + Chunks: msg.Chunks, + Hash: msg.Hash, + Metadata: msg.Metadata, + }) + if err != nil { + r.Logger.Error( + "failed to add snapshot", + "height", msg.Height, + "format", msg.Format, + "err", err, + "channel", r.snapshotCh.ID, + ) + return nil + } + + default: + r.Logger.Error("received unknown message", "msg", msg, "peer", envelope.From.String()) + return fmt.Errorf("received unknown message: %T", msg) + } + + return nil +} + +// handleChunkMessage handles enevelopes sent from peers on the ChunkChannel. +// It returns an error only if the Envelope.Message is unknown for this channel. +// This should never be called outside of handleMessage. +func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { + switch msg := envelope.Message.(type) { + case *ssproto.ChunkRequest: + r.Logger.Debug( + "received chunk request", + "height", msg.Height, + "format", msg.Format, + "chunk", msg.Index, + "peer", envelope.From.String(), + ) + resp, err := r.conn.LoadSnapshotChunkSync(context.Background(), abci.RequestLoadSnapshotChunk{ + Height: msg.Height, + Format: msg.Format, + Chunk: msg.Index, + }) + if err != nil { + r.Logger.Error( + "failed to load chunk", + "height", msg.Height, + "format", msg.Format, + "chunk", msg.Index, + "err", err, + "peer", envelope.From.String(), + ) + return nil + } + + r.Logger.Debug( + "sending chunk", + "height", msg.Height, + "format", msg.Format, + "chunk", msg.Index, + "peer", envelope.From.String(), + ) + r.chunkCh.Out() <- p2p.Envelope{ + To: envelope.From, + Message: &ssproto.ChunkResponse{ Height: msg.Height, Format: msg.Format, Index: msg.Index, Chunk: resp.Chunk, Missing: resp.Chunk == nil, - })) - - case *ssproto.ChunkResponse: - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.syncer == nil { - r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", src.ID()) - return + }, + } + + case *ssproto.ChunkResponse: + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer == nil { + r.Logger.Debug("received unexpected chunk; no state sync in progress", "peer", envelope.From.String()) + return nil + } + + r.Logger.Debug( + "received chunk; adding to sync", + "height", msg.Height, + "format", msg.Format, + "chunk", msg.Index, + "peer", envelope.From.String(), + ) + _, err := r.syncer.AddChunk(&chunk{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: msg.Chunk, + Sender: envelope.From, + }) + if err != nil { + r.Logger.Error( + "failed to add chunk", + "height", msg.Height, + "format", msg.Format, + "chunk", msg.Index, + "err", err, + "peer", envelope.From.String(), + ) + return nil + } + + default: + r.Logger.Error("received unknown message", "msg", msg, "peer", envelope.From.String()) + return fmt.Errorf("received unknown message: %T", msg) + } + + return nil +} + +// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. +// It will handle errors and any possible panics gracefully. A caller can handle +// any error returned by sending a PeerError on the respective channel. +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("panic in processing message: %v", e) + r.Logger.Error("recovering from processing message panic", "err", err) + } + }() + + switch chID { + case SnapshotChannel: + err = r.handleSnapshotMessage(envelope) + + case ChunkChannel: + err = r.handleChunkMessage(envelope) + + default: + err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + } + + return err +} + +// processSnapshotCh initiates a blocking process where we listen for and handle +// envelopes on the SnapshotChannel. Any error encountered during message +// execution will result in a PeerError being sent on the SnapshotChannel. When +// the reactor is stopped, we will catch the singal and close the p2p Channel +// gracefully. +func (r *Reactor) processSnapshotCh() { + defer r.snapshotCh.Close() + + for { + select { + case envelope := <-r.snapshotCh.In(): + if err := r.handleMessage(r.snapshotCh.ID(), envelope); err != nil { + r.snapshotCh.Error() <- p2p.PeerError{ + PeerID: envelope.From, + Err: err, + Severity: p2p.PeerErrorSeverityLow, + } } - r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "peer", src.ID()) - _, err := r.syncer.AddChunk(&chunk{ - Height: msg.Height, - Format: msg.Format, - Index: msg.Index, - Chunk: msg.Chunk, - Sender: src.ID(), - }) - if err != nil { - r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format, - "chunk", msg.Index, "err", err) - return + + case <-r.closeCh: + r.Logger.Debug("stopped listening on snapshot channel; closing...") + return + } + } +} + +// processChunkCh initiates a blocking process where we listen for and handle +// envelopes on the ChunkChannel. Any error encountered during message +// execution will result in a PeerError being sent on the ChunkChannel. When +// the reactor is stopped, we will catch the singal and close the p2p Channel +// gracefully. +func (r *Reactor) processChunkCh() { + defer r.chunkCh.Close() + + for { + select { + case envelope := <-r.chunkCh.In(): + if err := r.handleMessage(r.chunkCh.ID(), envelope); err != nil { + r.chunkCh.Error() <- p2p.PeerError{ + PeerID: envelope.From, + Err: err, + Severity: p2p.PeerErrorSeverityLow, + } } - default: - r.Logger.Error("Received unknown message %T", msg) + case <-r.closeCh: + r.Logger.Debug("stopped listening on chunk channel; closing...") + return } + } +} - default: - r.Logger.Error("Received message on invalid channel %x", chID) +// processPeerUpdate processes a PeerUpdate, returning an error upon failing to +// handle the PeerUpdate or if a panic is recovered. +func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("panic in processing peer update: %v", e) + r.Logger.Error("recovering from processing peer update panic", "err", err) + } + }() + + r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID.String(), "status", peerUpdate.Status) + + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + switch peerUpdate.Status { + case p2p.PeerStatusNew, p2p.PeerStatusUp: + r.syncer.AddPeer(peerUpdate.PeerID) + + case p2p.PeerStatusDown, p2p.PeerStatusRemoved, p2p.PeerStatusBanned: + r.syncer.RemovePeer(peerUpdate.PeerID) + } + } + + return err +} + +// processPeerUpdates initiates a blocking process where we listen for and handle +// PeerUpdate messages. When the reactor is stopped, we will catch the singal and +// close the p2p PeerUpdatesCh gracefully. +func (r *Reactor) processPeerUpdates() { + defer r.peerUpdates.Close() + + for { + select { + case peerUpdate := <-r.peerUpdates.Updates(): + _ = r.processPeerUpdate(peerUpdate) + + case <-r.closeCh: + r.Logger.Debug("stopped listening on peer updates channel; closing...") + return + } } } // recentSnapshots fetches the n most recent snapshots from the app func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { - resp, err := r.conn.ListSnapshotsSync(abci.RequestListSnapshots{}) + resp, err := r.conn.ListSnapshotsSync(context.Background(), abci.RequestListSnapshots{}) if err != nil { return nil, err } + sort.Slice(resp.Snapshots, func(i, j int) bool { a := resp.Snapshots[i] b := resp.Snapshots[j] + switch { case a.Height > b.Height: return true @@ -228,11 +456,13 @@ func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { return false } }) + snapshots := make([]*snapshot, 0, n) for i, s := range resp.Snapshots { if i >= recentSnapshots { break } + snapshots = append(snapshots, &snapshot{ Height: s.Height, Format: s.Format, @@ -241,6 +471,7 @@ func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { Metadata: s.Metadata, }) } + return snapshots, nil } @@ -252,16 +483,22 @@ func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) r.mtx.Unlock() return sm.State{}, nil, errors.New("a state sync is already in progress") } - r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir) + + r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.snapshotCh.Out(), r.chunkCh.Out(), r.tempDir) r.mtx.Unlock() - // Request snapshots from all currently connected peers - r.Logger.Debug("Requesting snapshots from known peers") - r.Switch.Broadcast(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})) + // request snapshots from all currently connected peers + r.Logger.Debug("requesting snapshots from known peers") + r.snapshotCh.Out() <- p2p.Envelope{ + Broadcast: true, + Message: &ssproto.SnapshotsRequest{}, + } state, commit, err := r.syncer.SyncAny(discoveryTime) + r.mtx.Lock() r.syncer = nil r.mtx.Unlock() + return state, commit, err } diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go index 02831a1156..dcbfccc60b 100644 --- a/statesync/reactor_test.go +++ b/statesync/reactor_test.go @@ -1,21 +1,136 @@ package statesync import ( + "context" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" abci "github.com/lazyledger/lazyledger-core/abci/types" + "github.com/lazyledger/lazyledger-core/libs/log" "github.com/lazyledger/lazyledger-core/p2p" - p2pmocks "github.com/lazyledger/lazyledger-core/p2p/mocks" ssproto "github.com/lazyledger/lazyledger-core/proto/tendermint/statesync" proxymocks "github.com/lazyledger/lazyledger-core/proxy/mocks" + "github.com/lazyledger/lazyledger-core/statesync/mocks" ) -func TestReactor_Receive_ChunkRequest(t *testing.T) { +type reactorTestSuite struct { + reactor *Reactor + syncer *syncer + + conn *proxymocks.AppConnSnapshot + connQuery *proxymocks.AppConnQuery + stateProvider *mocks.StateProvider + + snapshotChannel *p2p.Channel + snapshotInCh chan p2p.Envelope + snapshotOutCh chan p2p.Envelope + snapshotPeerErrCh chan p2p.PeerError + + chunkChannel *p2p.Channel + chunkInCh chan p2p.Envelope + chunkOutCh chan p2p.Envelope + chunkPeerErrCh chan p2p.PeerError + + peerUpdates *p2p.PeerUpdatesCh +} + +func setup( + t *testing.T, + conn *proxymocks.AppConnSnapshot, + connQuery *proxymocks.AppConnQuery, + stateProvider *mocks.StateProvider, + chBuf uint, +) *reactorTestSuite { + t.Helper() + + if conn == nil { + conn = &proxymocks.AppConnSnapshot{} + } + if connQuery == nil { + connQuery = &proxymocks.AppConnQuery{} + } + if stateProvider == nil { + stateProvider = &mocks.StateProvider{} + } + + rts := &reactorTestSuite{ + snapshotInCh: make(chan p2p.Envelope, chBuf), + snapshotOutCh: make(chan p2p.Envelope, chBuf), + snapshotPeerErrCh: make(chan p2p.PeerError, chBuf), + chunkInCh: make(chan p2p.Envelope, chBuf), + chunkOutCh: make(chan p2p.Envelope, chBuf), + chunkPeerErrCh: make(chan p2p.PeerError, chBuf), + peerUpdates: p2p.NewPeerUpdates(), + conn: conn, + connQuery: connQuery, + stateProvider: stateProvider, + } + + rts.snapshotChannel = p2p.NewChannel( + SnapshotChannel, + new(ssproto.Message), + rts.snapshotInCh, + rts.snapshotOutCh, + rts.snapshotPeerErrCh, + ) + + rts.chunkChannel = p2p.NewChannel( + ChunkChannel, + new(ssproto.Message), + rts.chunkInCh, + rts.chunkOutCh, + rts.chunkPeerErrCh, + ) + + rts.reactor = NewReactor( + log.NewNopLogger(), + conn, + connQuery, + rts.snapshotChannel, + rts.chunkChannel, + rts.peerUpdates, + "", + ) + + rts.syncer = newSyncer( + log.NewNopLogger(), + conn, + connQuery, + stateProvider, + rts.snapshotOutCh, + rts.chunkOutCh, + "", + ) + + require.NoError(t, rts.reactor.Start()) + require.True(t, rts.reactor.IsRunning()) + + t.Cleanup(func() { + require.NoError(t, rts.reactor.Stop()) + require.False(t, rts.reactor.IsRunning()) + }) + + return rts +} + +func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { + rts := setup(t, nil, nil, nil, 2) + + rts.chunkInCh <- p2p.Envelope{ + From: p2p.PeerID{0xAA}, + Message: &ssproto.SnapshotsRequest{}, + } + + response := <-rts.chunkPeerErrCh + require.Error(t, response.Err) + require.Empty(t, rts.chunkOutCh) + require.Contains(t, response.Err.Error(), "received unknown message") + require.Equal(t, p2p.PeerID{0xAA}, response.PeerID) +} + +func TestReactor_ChunkRequest(t *testing.T) { testcases := map[string]struct { request *ssproto.ChunkRequest chunk []byte @@ -24,62 +139,69 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { "chunk is returned": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, []byte{1, 2, 3}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}}, - "empty chunk is returned, as nil": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}, + }, + "empty chunk is returned, as empty": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, []byte{}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{}}, + }, "nil (missing) chunk is returned as missing": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, nil, &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, }, + "invalid request": { + &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + nil, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, + }, } for name, tc := range testcases { tc := tc + t.Run(name, func(t *testing.T) { - // Mock ABCI connection to return local snapshots + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("LoadSnapshotChunkSync", abci.RequestLoadSnapshotChunk{ + conn.On("LoadSnapshotChunkSync", context.Background(), abci.RequestLoadSnapshotChunk{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) - // Mock peer to store response, if found - peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) - var response *ssproto.ChunkResponse - if tc.expectResponse != nil { - peer.On("Send", ChunkChannel, mock.Anything).Run(func(args mock.Arguments) { - msg, err := decodeMsg(args[1].([]byte)) - require.NoError(t, err) - response = msg.(*ssproto.ChunkResponse) - }).Return(true) - } + rts := setup(t, conn, nil, nil, 2) - // Start a reactor and send a ssproto.ChunkRequest, then wait for and check response - r := NewReactor(conn, nil, "") - err := r.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := r.Stop(); err != nil { - t.Error(err) - } - }) + rts.chunkInCh <- p2p.Envelope{ + From: p2p.PeerID{0xAA}, + Message: tc.request, + } - r.Receive(ChunkChannel, peer, mustEncodeMsg(tc.request)) - time.Sleep(100 * time.Millisecond) - assert.Equal(t, tc.expectResponse, response) + response := <-rts.chunkOutCh + require.Equal(t, tc.expectResponse, response.Message) + require.Empty(t, rts.chunkOutCh) conn.AssertExpectations(t) - peer.AssertExpectations(t) }) } } -func TestReactor_Receive_SnapshotsRequest(t *testing.T) { +func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { + rts := setup(t, nil, nil, nil, 2) + + rts.snapshotInCh <- p2p.Envelope{ + From: p2p.PeerID{0xAA}, + Message: &ssproto.ChunkRequest{}, + } + + response := <-rts.snapshotPeerErrCh + require.Error(t, response.Err) + require.Empty(t, rts.snapshotOutCh) + require.Contains(t, response.Err.Error(), "received unknown message") + require.Equal(t, p2p.PeerID{0xAA}, response.PeerID) +} + +func TestReactor_SnapshotsRequest(t *testing.T) { testcases := map[string]struct { snapshots []*abci.Snapshot expectResponses []*ssproto.SnapshotsResponse @@ -117,41 +239,48 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { for name, tc := range testcases { tc := tc + t.Run(name, func(t *testing.T) { - // Mock ABCI connection to return local snapshots + // mock ABCI connection to return local snapshots conn := &proxymocks.AppConnSnapshot{} - conn.On("ListSnapshotsSync", abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + conn.On("ListSnapshotsSync", context.Background(), abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ Snapshots: tc.snapshots, }, nil) - // Mock peer to catch responses and store them in a slice - responses := []*ssproto.SnapshotsResponse{} - peer := &p2pmocks.Peer{} - if len(tc.expectResponses) > 0 { - peer.On("ID").Return(p2p.ID("id")) - peer.On("Send", SnapshotChannel, mock.Anything).Run(func(args mock.Arguments) { - msg, err := decodeMsg(args[1].([]byte)) - require.NoError(t, err) - responses = append(responses, msg.(*ssproto.SnapshotsResponse)) - }).Return(true) + rts := setup(t, conn, nil, nil, 100) + + rts.snapshotInCh <- p2p.Envelope{ + From: p2p.PeerID{0xAA}, + Message: &ssproto.SnapshotsRequest{}, } - // Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses - r := NewReactor(conn, nil, "") - err := r.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := r.Stop(); err != nil { - t.Error(err) - } - }) + if len(tc.expectResponses) > 0 { + retryUntil(t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) + } - r.Receive(SnapshotChannel, peer, mustEncodeMsg(&ssproto.SnapshotsRequest{})) - time.Sleep(100 * time.Millisecond) - assert.Equal(t, tc.expectResponses, responses) + responses := make([]*ssproto.SnapshotsResponse, len(tc.expectResponses)) + for i := 0; i < len(tc.expectResponses); i++ { + e := <-rts.snapshotOutCh + responses[i] = e.Message.(*ssproto.SnapshotsResponse) + } - conn.AssertExpectations(t) - peer.AssertExpectations(t) + require.Equal(t, tc.expectResponses, responses) + require.Empty(t, rts.snapshotOutCh) }) } } + +// retryUntil will continue to evaluate fn and will return successfully when true +// or fail when the timeout is reached. +func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + for { + if fn() { + return + } + + require.NoError(t, ctx.Err()) + } +} diff --git a/statesync/snapshots.go b/statesync/snapshots.go index 5bb9eeb928..77d6b4d24f 100644 --- a/statesync/snapshots.go +++ b/statesync/snapshots.go @@ -1,6 +1,7 @@ package statesync import ( + "bytes" "context" "crypto/sha256" "fmt" @@ -46,16 +47,16 @@ type snapshotPool struct { tmsync.Mutex snapshots map[snapshotKey]*snapshot - snapshotPeers map[snapshotKey]map[p2p.ID]p2p.Peer + snapshotPeers map[snapshotKey]map[string]p2p.PeerID // indexes for fast searches formatIndex map[uint32]map[snapshotKey]bool heightIndex map[uint64]map[snapshotKey]bool - peerIndex map[p2p.ID]map[snapshotKey]bool + peerIndex map[string]map[snapshotKey]bool // blacklists for rejected items formatBlacklist map[uint32]bool - peerBlacklist map[p2p.ID]bool + peerBlacklist map[string]bool snapshotBlacklist map[snapshotKey]bool } @@ -64,20 +65,21 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool { return &snapshotPool{ stateProvider: stateProvider, snapshots: make(map[snapshotKey]*snapshot), - snapshotPeers: make(map[snapshotKey]map[p2p.ID]p2p.Peer), + snapshotPeers: make(map[snapshotKey]map[string]p2p.PeerID), formatIndex: make(map[uint32]map[snapshotKey]bool), heightIndex: make(map[uint64]map[snapshotKey]bool), - peerIndex: make(map[p2p.ID]map[snapshotKey]bool), + peerIndex: make(map[string]map[snapshotKey]bool), formatBlacklist: make(map[uint32]bool), - peerBlacklist: make(map[p2p.ID]bool), + peerBlacklist: make(map[string]bool), snapshotBlacklist: make(map[snapshotKey]bool), } } -// Add adds a snapshot to the pool, unless the peer has already sent recentSnapshots snapshots. It -// returns true if this was a new, non-blacklisted snapshot. The snapshot height is verified using -// the light client, and the expected app hash is set for the snapshot. -func (p *snapshotPool) Add(peer p2p.Peer, snapshot *snapshot) (bool, error) { +// Add adds a snapshot to the pool, unless the peer has already sent recentSnapshots +// snapshots. It returns true if this was a new, non-blacklisted snapshot. The +// snapshot height is verified using the light client, and the expected app hash +// is set for the snapshot. +func (p *snapshotPool) Add(peer p2p.PeerID, snapshot *snapshot) (bool, error) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -94,23 +96,23 @@ func (p *snapshotPool) Add(peer p2p.Peer, snapshot *snapshot) (bool, error) { switch { case p.formatBlacklist[snapshot.Format]: return false, nil - case p.peerBlacklist[peer.ID()]: + case p.peerBlacklist[peer.String()]: return false, nil case p.snapshotBlacklist[key]: return false, nil - case len(p.peerIndex[peer.ID()]) >= recentSnapshots: + case len(p.peerIndex[peer.String()]) >= recentSnapshots: return false, nil } if p.snapshotPeers[key] == nil { - p.snapshotPeers[key] = make(map[p2p.ID]p2p.Peer) + p.snapshotPeers[key] = make(map[string]p2p.PeerID) } - p.snapshotPeers[key][peer.ID()] = peer + p.snapshotPeers[key][peer.String()] = peer - if p.peerIndex[peer.ID()] == nil { - p.peerIndex[peer.ID()] = make(map[snapshotKey]bool) + if p.peerIndex[peer.String()] == nil { + p.peerIndex[peer.String()] = make(map[snapshotKey]bool) } - p.peerIndex[peer.ID()][key] = true + p.peerIndex[peer.String()][key] = true if p.snapshots[key] != nil { return false, nil @@ -140,7 +142,7 @@ func (p *snapshotPool) Best() *snapshot { } // GetPeer returns a random peer for a snapshot, if any. -func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.Peer { +func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.PeerID { peers := p.GetPeers(snapshot) if len(peers) == 0 { return nil @@ -149,19 +151,22 @@ func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.Peer { } // GetPeers returns the peers for a snapshot. -func (p *snapshotPool) GetPeers(snapshot *snapshot) []p2p.Peer { +func (p *snapshotPool) GetPeers(snapshot *snapshot) []p2p.PeerID { key := snapshot.Key() + p.Lock() defer p.Unlock() - peers := make([]p2p.Peer, 0, len(p.snapshotPeers[key])) + peers := make([]p2p.PeerID, 0, len(p.snapshotPeers[key])) for _, peer := range p.snapshotPeers[key] { peers = append(peers, peer) } + // sort results, for testability (otherwise order is random, so tests randomly fail) sort.Slice(peers, func(a int, b int) bool { - return peers[a].ID() < peers[b].ID() + return bytes.Compare(peers[a], peers[b]) < 0 }) + return peers } @@ -222,33 +227,35 @@ func (p *snapshotPool) RejectFormat(format uint32) { } // RejectPeer rejects a peer. It will never be used again. -func (p *snapshotPool) RejectPeer(peerID p2p.ID) { - if peerID == "" { +func (p *snapshotPool) RejectPeer(peerID p2p.PeerID) { + if len(peerID) == 0 { return } + p.Lock() defer p.Unlock() p.removePeer(peerID) - p.peerBlacklist[peerID] = true + p.peerBlacklist[peerID.String()] = true } // RemovePeer removes a peer from the pool, and any snapshots that no longer have peers. -func (p *snapshotPool) RemovePeer(peerID p2p.ID) { +func (p *snapshotPool) RemovePeer(peerID p2p.PeerID) { p.Lock() defer p.Unlock() p.removePeer(peerID) } // removePeer removes a peer. The caller must hold the mutex lock. -func (p *snapshotPool) removePeer(peerID p2p.ID) { - for key := range p.peerIndex[peerID] { - delete(p.snapshotPeers[key], peerID) +func (p *snapshotPool) removePeer(peerID p2p.PeerID) { + for key := range p.peerIndex[peerID.String()] { + delete(p.snapshotPeers[key], peerID.String()) if len(p.snapshotPeers[key]) == 0 { p.removeSnapshot(key) } } - delete(p.peerIndex, peerID) + + delete(p.peerIndex, peerID.String()) } // removeSnapshot removes a snapshot. The caller must hold the mutex lock. diff --git a/statesync/snapshots_test.go b/statesync/snapshots_test.go index 15696e9df1..7f6f203801 100644 --- a/statesync/snapshots_test.go +++ b/statesync/snapshots_test.go @@ -3,12 +3,10 @@ package statesync import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/lazyledger/lazyledger-core/p2p" - p2pmocks "github.com/lazyledger/lazyledger-core/p2p/mocks" "github.com/lazyledger/lazyledger-core/statesync/mocks" ) @@ -35,7 +33,7 @@ func TestSnapshot_Key(t *testing.T) { before := s.Key() tc.modify(&s) after := s.Key() - assert.NotEqual(t, before, after) + require.NotEqual(t, before, after) }) } } @@ -44,36 +42,34 @@ func TestSnapshotPool_Add(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, uint64(1)).Return([]byte("app_hash"), nil) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + peerID := p2p.PeerID{0xAA} // Adding to the pool should work pool := newSnapshotPool(stateProvider) - added, err := pool.Add(peer, &snapshot{ + added, err := pool.Add(peerID, &snapshot{ Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}, }) require.NoError(t, err) - assert.True(t, added) + require.True(t, added) // Adding again from a different peer should return false - otherPeer := &p2pmocks.Peer{} - otherPeer.On("ID").Return(p2p.ID("other")) - added, err = pool.Add(peer, &snapshot{ + otherPeerID := p2p.PeerID{0xBB} + added, err = pool.Add(otherPeerID, &snapshot{ Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}, }) require.NoError(t, err) - assert.False(t, added) + require.False(t, added) // The pool should have populated the snapshot with the trusted app hash snapshot := pool.Best() require.NotNil(t, snapshot) - assert.Equal(t, []byte("app_hash"), snapshot.trustedAppHash) + require.Equal(t, []byte("app_hash"), snapshot.trustedAppHash) stateProvider.AssertExpectations(t) } @@ -84,16 +80,17 @@ func TestSnapshotPool_GetPeer(t *testing.T) { pool := newSnapshotPool(stateProvider) s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} - peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) - peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) - _, err := pool.Add(peerA, s) + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} + + _, err := pool.Add(peerAID, s) require.NoError(t, err) - _, err = pool.Add(peerB, s) + + _, err = pool.Add(peerBID, s) require.NoError(t, err) - _, err = pool.Add(peerA, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{1}}) + + _, err = pool.Add(peerAID, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{1}}) require.NoError(t, err) // GetPeer currently picks a random peer, so lets run it until we've seen both. @@ -101,17 +98,17 @@ func TestSnapshotPool_GetPeer(t *testing.T) { seenB := false for !seenA || !seenB { peer := pool.GetPeer(s) - switch peer.ID() { - case p2p.ID("a"): + if peer.Equal(peerAID) { seenA = true - case p2p.ID("b"): + } + if peer.Equal(peerBID) { seenB = true } } // GetPeer should return nil for an unknown snapshot peer := pool.GetPeer(&snapshot{Height: 9, Format: 9}) - assert.Nil(t, peer) + require.Nil(t, peer) } func TestSnapshotPool_GetPeers(t *testing.T) { @@ -120,22 +117,23 @@ func TestSnapshotPool_GetPeers(t *testing.T) { pool := newSnapshotPool(stateProvider) s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} - peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) - peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) - _, err := pool.Add(peerA, s) + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} + + _, err := pool.Add(peerAID, s) require.NoError(t, err) - _, err = pool.Add(peerB, s) + + _, err = pool.Add(peerBID, s) require.NoError(t, err) - _, err = pool.Add(peerA, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}) + + _, err = pool.Add(peerAID, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}) require.NoError(t, err) peers := pool.GetPeers(s) - assert.Len(t, peers, 2) - assert.EqualValues(t, "a", peers[0].ID()) - assert.EqualValues(t, "b", peers[1].ID()) + require.Len(t, peers, 2) + require.Equal(t, peerAID, peers[0]) + require.EqualValues(t, peerBID, peers[1]) } func TestSnapshotPool_Ranked_Best(t *testing.T) { @@ -150,28 +148,30 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { snapshot *snapshot peers []string }{ - {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []string{"a", "b", "c"}}, - {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"a"}}, - {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []string{"a", "b"}}, - {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"a", "b"}}, - {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []string{"a", "b", "c"}}, + {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []string{"AA", "BB", "CC"}}, + {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"AA"}}, + {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []string{"AA", "BB"}}, + {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"AA", "BB"}}, + {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []string{"AA", "BB", "CC"}}, } // Add snapshots in reverse order, to make sure the pool enforces some order. for i := len(expectSnapshots) - 1; i >= 0; i-- { - for _, peerID := range expectSnapshots[i].peers { - peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID(peerID)) - _, err := pool.Add(peer, expectSnapshots[i].snapshot) + for _, peerIDStr := range expectSnapshots[i].peers { + peerID, err := p2p.PeerIDFromString(peerIDStr) + require.NoError(t, err) + + _, err = pool.Add(peerID, expectSnapshots[i].snapshot) require.NoError(t, err) } } // Ranked should return the snapshots in the same order ranked := pool.Ranked() - assert.Len(t, ranked, len(expectSnapshots)) + require.Len(t, ranked, len(expectSnapshots)) + for i := range ranked { - assert.Equal(t, expectSnapshots[i].snapshot, ranked[i]) + require.Equal(t, expectSnapshots[i].snapshot, ranked[i]) } // Check that best snapshots are returned in expected order @@ -180,15 +180,16 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { require.Equal(t, snapshot, pool.Best()) pool.Reject(snapshot) } - assert.Nil(t, pool.Best()) + + require.Nil(t, pool.Best()) } func TestSnapshotPool_Reject(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + + peerID := p2p.PeerID{0xAA} snapshots := []*snapshot{ {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, @@ -197,28 +198,28 @@ func TestSnapshotPool_Reject(t *testing.T) { {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, } for _, s := range snapshots { - _, err := pool.Add(peer, s) + _, err := pool.Add(peerID, s) require.NoError(t, err) } pool.Reject(snapshots[0]) - assert.Equal(t, snapshots[1:], pool.Ranked()) + require.Equal(t, snapshots[1:], pool.Ranked()) - added, err := pool.Add(peer, snapshots[0]) + added, err := pool.Add(peerID, snapshots[0]) require.NoError(t, err) - assert.False(t, added) + require.False(t, added) - added, err = pool.Add(peer, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + added, err = pool.Add(peerID, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) require.NoError(t, err) - assert.True(t, added) + require.True(t, added) } func TestSnapshotPool_RejectFormat(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID("id")) + + peerID := p2p.PeerID{0xAA} snapshots := []*snapshot{ {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, @@ -227,21 +228,21 @@ func TestSnapshotPool_RejectFormat(t *testing.T) { {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, } for _, s := range snapshots { - _, err := pool.Add(peer, s) + _, err := pool.Add(peerID, s) require.NoError(t, err) } pool.RejectFormat(1) - assert.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) + require.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) - added, err := pool.Add(peer, &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{1}}) + added, err := pool.Add(peerID, &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{1}}) require.NoError(t, err) - assert.False(t, added) - assert.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) + require.False(t, added) + require.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) - added, err = pool.Add(peer, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + added, err = pool.Add(peerID, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) require.NoError(t, err) - assert.True(t, added) + require.True(t, added) } func TestSnapshotPool_RejectPeer(t *testing.T) { @@ -249,41 +250,41 @@ func TestSnapshotPool_RejectPeer(t *testing.T) { stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) - peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} s3 := &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{2}} - _, err := pool.Add(peerA, s1) + _, err := pool.Add(peerAID, s1) require.NoError(t, err) - _, err = pool.Add(peerA, s2) + + _, err = pool.Add(peerAID, s2) require.NoError(t, err) - _, err = pool.Add(peerB, s2) + _, err = pool.Add(peerBID, s2) require.NoError(t, err) - _, err = pool.Add(peerB, s3) + + _, err = pool.Add(peerBID, s3) require.NoError(t, err) - pool.RejectPeer(peerA.ID()) + pool.RejectPeer(peerAID) - assert.Empty(t, pool.GetPeers(s1)) + require.Empty(t, pool.GetPeers(s1)) peers2 := pool.GetPeers(s2) - assert.Len(t, peers2, 1) - assert.EqualValues(t, "b", peers2[0].ID()) + require.Len(t, peers2, 1) + require.Equal(t, peerBID, peers2[0]) peers3 := pool.GetPeers(s2) - assert.Len(t, peers3, 1) - assert.EqualValues(t, "b", peers3[0].ID()) + require.Len(t, peers3, 1) + require.Equal(t, peerBID, peers3[0]) // it should no longer be possible to add the peer back - _, err = pool.Add(peerA, s1) + _, err = pool.Add(peerAID, s1) require.NoError(t, err) - assert.Empty(t, pool.GetPeers(s1)) + require.Empty(t, pool.GetPeers(s1)) } func TestSnapshotPool_RemovePeer(t *testing.T) { @@ -291,35 +292,36 @@ func TestSnapshotPool_RemovePeer(t *testing.T) { stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) - peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} - _, err := pool.Add(peerA, s1) + _, err := pool.Add(peerAID, s1) require.NoError(t, err) - _, err = pool.Add(peerA, s2) + + _, err = pool.Add(peerAID, s2) require.NoError(t, err) - _, err = pool.Add(peerB, s1) + + _, err = pool.Add(peerBID, s1) require.NoError(t, err) - pool.RemovePeer(peerA.ID()) + pool.RemovePeer(peerAID) peers1 := pool.GetPeers(s1) - assert.Len(t, peers1, 1) - assert.EqualValues(t, "b", peers1[0].ID()) + require.Len(t, peers1, 1) + require.Equal(t, peerBID, peers1[0]) peers2 := pool.GetPeers(s2) - assert.Empty(t, peers2) + require.Empty(t, peers2) // it should still be possible to add the peer back - _, err = pool.Add(peerA, s1) + _, err = pool.Add(peerAID, s1) require.NoError(t, err) + peers1 = pool.GetPeers(s1) - assert.Len(t, peers1, 2) - assert.EqualValues(t, "a", peers1[0].ID()) - assert.EqualValues(t, "b", peers1[1].ID()) + require.Len(t, peers1, 2) + require.Equal(t, peerAID, peers1[0]) + require.Equal(t, peerBID, peers1[1]) } diff --git a/statesync/syncer.go b/statesync/syncer.go index 9b0dff77c8..f4bde5d7cb 100644 --- a/statesync/syncer.go +++ b/statesync/syncer.go @@ -54,6 +54,8 @@ type syncer struct { conn proxy.AppConnSnapshot connQuery proxy.AppConnQuery snapshots *snapshotPool + snapshotCh chan<- p2p.Envelope + chunkCh chan<- p2p.Envelope tempDir string mtx tmsync.RWMutex @@ -61,14 +63,22 @@ type syncer struct { } // newSyncer creates a new syncer. -func newSyncer(logger log.Logger, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, - stateProvider StateProvider, tempDir string) *syncer { +func newSyncer( + logger log.Logger, + conn proxy.AppConnSnapshot, + connQuery proxy.AppConnQuery, + stateProvider StateProvider, + snapshotCh, chunkCh chan<- p2p.Envelope, + tempDir string, +) *syncer { return &syncer{ logger: logger, stateProvider: stateProvider, conn: conn, connQuery: connQuery, snapshots: newSnapshotPool(stateProvider), + snapshotCh: snapshotCh, + chunkCh: chunkCh, tempDir: tempDir, } } @@ -97,7 +107,7 @@ func (s *syncer) AddChunk(chunk *chunk) (bool, error) { // AddSnapshot adds a snapshot to the snapshot pool. It returns true if a new, previously unseen // snapshot was accepted and added. -func (s *syncer) AddSnapshot(peer p2p.Peer, snapshot *snapshot) (bool, error) { +func (s *syncer) AddSnapshot(peer p2p.PeerID, snapshot *snapshot) (bool, error) { added, err := s.snapshots.Add(peer, snapshot) if err != nil { return false, err @@ -109,17 +119,20 @@ func (s *syncer) AddSnapshot(peer p2p.Peer, snapshot *snapshot) (bool, error) { return added, nil } -// AddPeer adds a peer to the pool. For now we just keep it simple and send a single request -// to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peer p2p.Peer) { - s.logger.Debug("Requesting snapshots from peer", "peer", peer.ID()) - peer.Send(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})) +// AddPeer adds a peer to the pool. For now we just keep it simple and send a +// single request to discover snapshots, later we may want to do retries and stuff. +func (s *syncer) AddPeer(peer p2p.PeerID) { + s.logger.Debug("Requesting snapshots from peer", "peer", peer.String()) + s.snapshotCh <- p2p.Envelope{ + To: peer, + Message: &ssproto.SnapshotsRequest{}, + } } // RemovePeer removes a peer from the pool. -func (s *syncer) RemovePeer(peer p2p.Peer) { - s.logger.Debug("Removing peer from sync", "peer", peer.ID()) - s.snapshots.RemovePeer(peer.ID()) +func (s *syncer) RemovePeer(peer p2p.PeerID) { + s.logger.Debug("Removing peer from sync", "peer", peer.String()) + s.snapshots.RemovePeer(peer) } // SyncAny tries to sync any of the snapshots in the snapshot pool, waiting to discover further @@ -192,8 +205,8 @@ func (s *syncer) SyncAny(discoveryTime time.Duration) (sm.State, *types.Commit, s.logger.Info("Snapshot senders rejected", "height", snapshot.Height, "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) for _, peer := range s.snapshots.GetPeers(snapshot) { - s.snapshots.RejectPeer(peer.ID()) - s.logger.Info("Snapshot sender rejected", "peer", peer.ID()) + s.snapshots.RejectPeer(peer) + s.logger.Info("Snapshot sender rejected", "peer", peer.String()) } default: @@ -277,7 +290,7 @@ func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types. func (s *syncer) offerSnapshot(snapshot *snapshot) error { s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) - resp, err := s.conn.OfferSnapshotSync(abci.RequestOfferSnapshot{ + resp, err := s.conn.OfferSnapshotSync(context.Background(), abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: snapshot.Height, Format: snapshot.Format, @@ -319,10 +332,10 @@ func (s *syncer) applyChunks(chunks *chunkQueue) error { return fmt.Errorf("failed to fetch chunk: %w", err) } - resp, err := s.conn.ApplySnapshotChunkSync(abci.RequestApplySnapshotChunk{ + resp, err := s.conn.ApplySnapshotChunkSync(context.Background(), abci.RequestApplySnapshotChunk{ Index: chunk.Index, Chunk: chunk.Chunk, - Sender: string(chunk.Sender), + Sender: chunk.Sender.String(), }) if err != nil { return fmt.Errorf("failed to apply chunk %v: %w", chunk.Index, err) @@ -341,9 +354,14 @@ func (s *syncer) applyChunks(chunks *chunkQueue) error { // Reject any senders as requested by the app for _, sender := range resp.RejectSenders { if sender != "" { - s.snapshots.RejectPeer(p2p.ID(sender)) - err := chunks.DiscardSender(p2p.ID(sender)) + peerID, err := p2p.PeerIDFromString(sender) if err != nil { + return err + } + + s.snapshots.RejectPeer(peerID) + + if err := chunks.DiscardSender(peerID); err != nil { return fmt.Errorf("failed to reject sender: %w", err) } } @@ -410,34 +428,49 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { "format", snapshot.Format, "hash", snapshot.Hash) return } - s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, - "format", snapshot.Format, "chunk", chunk, "peer", peer.ID()) - peer.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkRequest{ - Height: snapshot.Height, - Format: snapshot.Format, - Index: chunk, - })) + + s.logger.Debug( + "Requesting snapshot chunk", + "height", snapshot.Height, + "format", snapshot.Format, + "chunk", chunk, + "peer", peer.String(), + ) + + s.chunkCh <- p2p.Envelope{ + To: peer, + Message: &ssproto.ChunkRequest{ + Height: snapshot.Height, + Format: snapshot.Format, + Index: chunk, + }, + } } // verifyApp verifies the sync, checking the app hash and last block height. It returns the // app version, which should be returned as part of the initial state. func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { - resp, err := s.connQuery.InfoSync(proxy.RequestInfo) + resp, err := s.connQuery.InfoSync(context.Background(), proxy.RequestInfo) if err != nil { return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err) } + if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) { s.logger.Error("appHash verification failed", "expected", fmt.Sprintf("%X", snapshot.trustedAppHash), "actual", fmt.Sprintf("%X", resp.LastBlockAppHash)) return 0, errVerifyFailed } + if uint64(resp.LastBlockHeight) != snapshot.Height { - s.logger.Error("ABCI app reported unexpected last block height", - "expected", snapshot.Height, "actual", resp.LastBlockHeight) + s.logger.Error( + "ABCI app reported unexpected last block height", + "expected", snapshot.Height, + "actual", resp.LastBlockHeight, + ) return 0, errVerifyFailed } - s.logger.Info("Verified ABCI app", "height", snapshot.Height, - "appHash", fmt.Sprintf("%X", snapshot.trustedAppHash)) + + s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", fmt.Sprintf("%X", snapshot.trustedAppHash)) return resp.AppVersion, nil } diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go index c7c035f364..17e07f4bb8 100644 --- a/statesync/syncer_test.go +++ b/statesync/syncer_test.go @@ -1,19 +1,17 @@ package statesync import ( + "context" "errors" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" abci "github.com/lazyledger/lazyledger-core/abci/types" - "github.com/lazyledger/lazyledger-core/libs/log" tmsync "github.com/lazyledger/lazyledger-core/libs/sync" "github.com/lazyledger/lazyledger-core/p2p" - p2pmocks "github.com/lazyledger/lazyledger-core/p2p/mocks" tmstate "github.com/lazyledger/lazyledger-core/proto/tendermint/state" ssproto "github.com/lazyledger/lazyledger-core/proto/tendermint/statesync" tmversion "github.com/lazyledger/lazyledger-core/proto/tendermint/version" @@ -25,22 +23,7 @@ import ( "github.com/lazyledger/lazyledger-core/version" ) -// Sets up a basic syncer that can be used to test OfferSnapshot requests -func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) { - connQuery := &proxymocks.AppConnQuery{} - connSnapshot := &proxymocks.AppConnSnapshot{} - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") - return syncer, connSnapshot -} - -// Sets up a simple peer mock with an ID -func simplePeer(id string) *p2pmocks.Peer { - peer := &p2pmocks.Peer{} - peer.On("ID").Return(p2p.ID(id)) - return peer -} +var ctx = context.Background() func TestSyncer_SyncAny(t *testing.T) { state := sm.State{ @@ -50,7 +33,6 @@ func TestSyncer_SyncAny(t *testing.T) { Block: version.BlockProtocol, App: 0, }, - Software: version.TMCoreSemVer, }, @@ -84,42 +66,43 @@ func TestSyncer_SyncAny(t *testing.T) { connSnapshot := &proxymocks.AppConnSnapshot{} connQuery := &proxymocks.AppConnQuery{} - syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} + + rts := setup(t, connSnapshot, connQuery, stateProvider, 3) // Adding a chunk should error when no sync is in progress - _, err := syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) + _, err := rts.syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - peerA := &p2pmocks.Peer{} - peerA.On("ID").Return(p2p.ID("a")) - peerA.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true) - syncer.AddPeer(peerA) - peerA.AssertExpectations(t) - - peerB := &p2pmocks.Peer{} - peerB.On("ID").Return(p2p.ID("b")) - peerB.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true) - syncer.AddPeer(peerB) - peerB.AssertExpectations(t) + rts.syncer.AddPeer(peerAID) + e := <-rts.snapshotOutCh + require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) + require.Equal(t, peerAID, e.To) + + rts.syncer.AddPeer(peerBID) + e = <-rts.snapshotOutCh + require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) + require.Equal(t, peerBID, e.To) // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in // format 2, which will be rejected by the ABCI application. - new, err := syncer.AddSnapshot(peerA, s) + new, err := rts.syncer.AddSnapshot(peerAID, s) require.NoError(t, err) - assert.True(t, new) + require.True(t, new) - new, err = syncer.AddSnapshot(peerB, s) + new, err = rts.syncer.AddSnapshot(peerBID, s) require.NoError(t, err) - assert.False(t, new) + require.False(t, new) - new, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) + new, err = rts.syncer.AddSnapshot(peerBID, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) require.NoError(t, err) - assert.True(t, new) + require.True(t, new) // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -128,7 +111,7 @@ func TestSyncer_SyncAny(t *testing.T) { }, AppHash: []byte("app_hash_2"), }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -141,29 +124,30 @@ func TestSyncer_SyncAny(t *testing.T) { chunkRequests := make(map[uint32]int) chunkRequestsMtx := tmsync.Mutex{} - onChunkRequest := func(args mock.Arguments) { - pb, err := decodeMsg(args[1].([]byte)) - require.NoError(t, err) - msg := pb.(*ssproto.ChunkRequest) - require.EqualValues(t, 1, msg.Height) - require.EqualValues(t, 1, msg.Format) - require.LessOrEqual(t, msg.Index, uint32(len(chunks))) - - added, err := syncer.AddChunk(chunks[msg.Index]) - require.NoError(t, err) - assert.True(t, added) - - chunkRequestsMtx.Lock() - chunkRequests[msg.Index]++ - chunkRequestsMtx.Unlock() - } - peerA.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) - peerB.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) + + go func() { + for e := range rts.chunkOutCh { + msg, ok := e.Message.(*ssproto.ChunkRequest) + require.True(t, ok) + + require.EqualValues(t, 1, msg.Height) + require.EqualValues(t, 1, msg.Format) + require.LessOrEqual(t, msg.Index, uint32(len(chunks))) + + added, err := rts.syncer.AddChunk(chunks[msg.Index]) + require.NoError(t, err) + require.True(t, added) + + chunkRequestsMtx.Lock() + chunkRequests[msg.Index]++ + chunkRequestsMtx.Unlock() + } + }() // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( &abci.ResponseApplySnapshotChunk{ @@ -171,128 +155,155 @@ func TestSyncer_SyncAny(t *testing.T) { RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{1, 1, 0}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1, 1, 1}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("InfoSync", proxy.RequestInfo).Return(&abci.ResponseInfo{ + connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: 9, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), }, nil) - newState, lastCommit, err := syncer.SyncAny(0) + newState, lastCommit, err := rts.syncer.SyncAny(0) require.NoError(t, err) time.Sleep(50 * time.Millisecond) // wait for peers to receive requests chunkRequestsMtx.Lock() - assert.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) + require.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) chunkRequestsMtx.Unlock() // The syncer should have updated the state app version from the ABCI info response. expectState := state expectState.Version.Consensus.App = 9 - assert.Equal(t, expectState, newState) - assert.Equal(t, commit, lastCommit) + require.Equal(t, expectState, newState) + require.Equal(t, commit, lastCommit) connSnapshot.AssertExpectations(t) connQuery.AssertExpectations(t) - peerA.AssertExpectations(t) - peerB.AssertExpectations(t) } func TestSyncer_SyncAny_noSnapshots(t *testing.T) { - syncer, _ := setupOfferSyncer(t) - _, _, err := syncer.SyncAny(0) - assert.Equal(t, errNoSnapshots, err) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) + + _, _, err := rts.syncer.SyncAny(0) + require.Equal(t, errNoSnapshots, err) } func TestSyncer_SyncAny_abort(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - _, err := syncer.AddSnapshot(simplePeer("id"), s) + peerID := p2p.PeerID{0xAA} + + _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = syncer.SyncAny(0) - assert.Equal(t, errAbort, err) - connSnapshot.AssertExpectations(t) + _, _, err = rts.syncer.SyncAny(0) + require.Equal(t, errAbort, err) + rts.conn.AssertExpectations(t) } func TestSyncer_SyncAny_reject(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - _, err := syncer.AddSnapshot(simplePeer("id"), s22) + + peerID := p2p.PeerID{0xAA} + + _, err := rts.syncer.AddSnapshot(peerID, s22) require.NoError(t, err) - _, err = syncer.AddSnapshot(simplePeer("id"), s12) + + _, err = rts.syncer.AddSnapshot(peerID, s12) require.NoError(t, err) - _, err = syncer.AddSnapshot(simplePeer("id"), s11) + + _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s12), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = syncer.SyncAny(0) - assert.Equal(t, errNoSnapshots, err) - connSnapshot.AssertExpectations(t) + _, _, err = rts.syncer.SyncAny(0) + require.Equal(t, errNoSnapshots, err) + rts.conn.AssertExpectations(t) } func TestSyncer_SyncAny_reject_format(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - _, err := syncer.AddSnapshot(simplePeer("id"), s22) + + peerID := p2p.PeerID{0xAA} + + _, err := rts.syncer.AddSnapshot(peerID, s22) require.NoError(t, err) - _, err = syncer.AddSnapshot(simplePeer("id"), s12) + + _, err = rts.syncer.AddSnapshot(peerID, s12) require.NoError(t, err) - _, err = syncer.AddSnapshot(simplePeer("id"), s11) + + _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = syncer.SyncAny(0) - assert.Equal(t, errAbort, err) - connSnapshot.AssertExpectations(t) + _, _, err = rts.syncer.SyncAny(0) + require.Equal(t, errAbort, err) + rts.conn.AssertExpectations(t) } func TestSyncer_SyncAny_reject_sender(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) - peerA := simplePeer("a") - peerB := simplePeer("b") - peerC := simplePeer("c") + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} + peerCID := p2p.PeerID{0xCC} // sbc will be offered first, which will be rejected with reject_sender, causing all snapshots // submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and @@ -301,44 +312,56 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { sb := &snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} sc := &snapshot{Height: 3, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} sbc := &snapshot{Height: 4, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - _, err := syncer.AddSnapshot(peerA, sa) + + _, err := rts.syncer.AddSnapshot(peerAID, sa) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerB, sb) + + _, err = rts.syncer.AddSnapshot(peerBID, sb) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerC, sc) + + _, err = rts.syncer.AddSnapshot(peerCID, sc) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerB, sbc) + + _, err = rts.syncer.AddSnapshot(peerBID, sbc) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerC, sbc) + + _, err = rts.syncer.AddSnapshot(peerCID, sbc) require.NoError(t, err) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(sa), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = syncer.SyncAny(0) - assert.Equal(t, errNoSnapshots, err) - connSnapshot.AssertExpectations(t) + _, _, err = rts.syncer.SyncAny(0) + require.Equal(t, errNoSnapshots, err) + rts.conn.AssertExpectations(t) } func TestSyncer_SyncAny_abciError(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - _, err := syncer.AddSnapshot(simplePeer("id"), s) + + peerID := p2p.PeerID{0xAA} + + _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) - _, _, err = syncer.SyncAny(0) - assert.True(t, errors.Is(err, errBoom)) - connSnapshot.AssertExpectations(t) + _, _, err = rts.syncer.SyncAny(0) + require.True(t, errors.Is(err, errBoom)) + rts.conn.AssertExpectations(t) } func TestSyncer_offerSnapshot(t *testing.T) { @@ -362,13 +385,18 @@ func TestSyncer_offerSnapshot(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + + rts := setup(t, nil, nil, stateProvider, 2) + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) - err := syncer.offerSnapshot(s) + + err := rts.syncer.offerSnapshot(s) if tc.expectErr == unknownErr { require.Error(t, err) } else { @@ -376,7 +404,7 @@ func TestSyncer_offerSnapshot(t *testing.T) { if unwrapped != nil { err = unwrapped } - assert.Equal(t, tc.expectErr, err) + require.Equal(t, tc.expectErr, err) } }) } @@ -403,11 +431,10 @@ func TestSyncer_applyChunks_Results(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - connQuery := &proxymocks.AppConnQuery{} - connSnapshot := &proxymocks.AppConnSnapshot{} stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + rts := setup(t, nil, nil, stateProvider, 2) body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") @@ -415,17 +442,17 @@ func TestSyncer_applyChunks_Results(t *testing.T) { _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } - err = syncer.applyChunks(chunks) + err = rts.syncer.applyChunks(chunks) if tc.expectErr == unknownErr { require.Error(t, err) } else { @@ -433,9 +460,10 @@ func TestSyncer_applyChunks_Results(t *testing.T) { if unwrapped != nil { err = unwrapped } - assert.Equal(t, tc.expectErr, err) + require.Equal(t, tc.expectErr, err) } - connSnapshot.AssertExpectations(t) + + rts.conn.AssertExpectations(t) }) } } @@ -454,11 +482,10 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - connQuery := &proxymocks.AppConnQuery{} - connSnapshot := &proxymocks.AppConnSnapshot{} stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + rts := setup(t, nil, nil, stateProvider, 2) chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") require.NoError(t, err) @@ -473,13 +500,13 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for 1 to be refetched - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -490,15 +517,15 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { // check the queue contents, and finally close the queue to end the goroutine. // We don't really care about the result of applyChunks, since it has separate test. go func() { - syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) - assert.True(t, chunks.Has(0)) - assert.False(t, chunks.Has(1)) - assert.True(t, chunks.Has(2)) - err = chunks.Close() - require.NoError(t, err) + require.True(t, chunks.Has(0)) + require.False(t, chunks.Has(1)) + require.True(t, chunks.Has(2)) + + require.NoError(t, chunks.Close()) }) } } @@ -517,63 +544,71 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - connQuery := &proxymocks.AppConnQuery{} - connSnapshot := &proxymocks.AppConnSnapshot{} stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + rts := setup(t, nil, nil, stateProvider, 2) // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. - peerA := simplePeer("a") - peerB := simplePeer("b") - peerC := simplePeer("c") + peerAID := p2p.PeerID{0xAA} + peerBID := p2p.PeerID{0xBB} + peerCID := p2p.PeerID{0xCC} s1 := &snapshot{Height: 1, Format: 1, Chunks: 3} s2 := &snapshot{Height: 2, Format: 1, Chunks: 3} - _, err := syncer.AddSnapshot(peerA, s1) + + _, err := rts.syncer.AddSnapshot(peerAID, s1) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerA, s2) + + _, err = rts.syncer.AddSnapshot(peerAID, s2) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerB, s1) + + _, err = rts.syncer.AddSnapshot(peerBID, s1) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerB, s2) + + _, err = rts.syncer.AddSnapshot(peerBID, s2) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerC, s1) + + _, err = rts.syncer.AddSnapshot(peerCID, s1) require.NoError(t, err) - _, err = syncer.AddSnapshot(peerC, s2) + + _, err = rts.syncer.AddSnapshot(peerCID, s2) require.NoError(t, err) chunks, err := newChunkQueue(s1, "") require.NoError(t, err) - added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerA.ID()}) + + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerAID}) require.True(t, added) require.NoError(t, err) - added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerB.ID()}) + + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerBID}) require.True(t, added) require.NoError(t, err) - added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerC.ID()}) + + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerCID}) require.True(t, added) require.NoError(t, err) // The first two chunks are accepted, before the last one asks for b sender to be rejected - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ - Index: 0, Chunk: []byte{0}, Sender: "a", + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{0}, Sender: "aa", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ - Index: 1, Chunk: []byte{1}, Sender: "b", + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1}, Sender: "bb", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{2}, Sender: "c", + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, - RejectSenders: []string{string(peerB.ID())}, + RejectSenders: []string{peerBID.String()}, }, nil) // On retry, the last chunk will be tried again, so we just accept it then. if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ - Index: 2, Chunk: []byte{2}, Sender: "c", + rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } @@ -581,23 +616,22 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // However, it will block on e.g. retry result, so we spawn a goroutine that will // be shut down when the chunk queue closes. go func() { - syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) - s1peers := syncer.snapshots.GetPeers(s1) - assert.Len(t, s1peers, 2) - assert.EqualValues(t, "a", s1peers[0].ID()) - assert.EqualValues(t, "c", s1peers[1].ID()) + s1peers := rts.syncer.snapshots.GetPeers(s1) + require.Len(t, s1peers, 2) + require.EqualValues(t, "aa", s1peers[0].String()) + require.EqualValues(t, "cc", s1peers[1].String()) - syncer.snapshots.GetPeers(s1) - assert.Len(t, s1peers, 2) - assert.EqualValues(t, "a", s1peers[0].ID()) - assert.EqualValues(t, "c", s1peers[1].ID()) + rts.syncer.snapshots.GetPeers(s1) + require.Len(t, s1peers, 2) + require.EqualValues(t, "aa", s1peers[0].String()) + require.EqualValues(t, "cc", s1peers[1].String()) - err = chunks.Close() - require.NoError(t, err) + require.NoError(t, chunks.Close()) }) } } @@ -631,20 +665,18 @@ func TestSyncer_verifyApp(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - connQuery := &proxymocks.AppConnQuery{} - connSnapshot := &proxymocks.AppConnSnapshot{} - stateProvider := &mocks.StateProvider{} - syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + rts := setup(t, nil, nil, nil, 2) - connQuery.On("InfoSync", proxy.RequestInfo).Return(tc.response, tc.err) - version, err := syncer.verifyApp(s) + rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(tc.response, tc.err) + version, err := rts.syncer.verifyApp(s) unwrapped := errors.Unwrap(err) if unwrapped != nil { err = unwrapped } - assert.Equal(t, tc.expectErr, err) + + require.Equal(t, tc.expectErr, err) if err == nil { - assert.Equal(t, tc.response.AppVersion, version) + require.Equal(t, tc.response.AppVersion, version) } }) } diff --git a/tests.mk b/test/Makefile similarity index 91% rename from tests.mk rename to test/Makefile index e4431d9357..77b29cfa09 100644 --- a/tests.mk +++ b/test/Makefile @@ -52,11 +52,6 @@ test100: @for i in {1..100}; do make test; done .PHONY: test100 -vagrant_test: - vagrant up - vagrant ssh -c 'make test_integrations' -.PHONY: vagrant_test - ### go tests test: @echo "--> Running go test" @@ -70,5 +65,5 @@ test_race: test_deadlock: @echo "--> Running go test --deadlock" - @go test -p 1 -v $(PACKAGES) -tags deadlock + @go test -p 1 -v $(PACKAGES) -tags deadlock .PHONY: test_race diff --git a/test/app/test.sh b/test/app/test.sh index dc60bfc1fb..710aae80b1 100755 --- a/test/app/test.sh +++ b/test/app/test.sh @@ -17,7 +17,7 @@ function kvstore_over_socket(){ echo "Starting kvstore_over_socket" abci-cli kvstore > /dev/null & pid_kvstore=$! - tendermint node > tendermint.log & + tendermint start > tendermint.log & pid_tendermint=$! sleep 5 @@ -32,7 +32,7 @@ function kvstore_over_socket_reorder(){ rm -rf $TMHOME tendermint init echo "Starting kvstore_over_socket_reorder (ie. start tendermint first)" - tendermint node > tendermint.log & + tendermint start > tendermint.log & pid_tendermint=$! sleep 2 abci-cli kvstore > /dev/null & @@ -52,7 +52,7 @@ function counter_over_socket() { echo "Starting counter_over_socket" abci-cli counter --serial > /dev/null & pid_counter=$! - tendermint node > tendermint.log & + tendermint start > tendermint.log & pid_tendermint=$! sleep 5 @@ -68,7 +68,7 @@ function counter_over_grpc() { echo "Starting counter_over_grpc" abci-cli counter --serial --abci grpc > /dev/null & pid_counter=$! - tendermint node --abci grpc > tendermint.log & + tendermint start --abci grpc > tendermint.log & pid_tendermint=$! sleep 5 @@ -86,7 +86,7 @@ function counter_over_grpc_grpc() { pid_counter=$! sleep 1 GRPC_PORT=36656 - tendermint node --abci grpc --rpc.grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log & + tendermint start --abci grpc --rpc.grpc-laddr tcp://localhost:$GRPC_PORT > tendermint.log & pid_tendermint=$! sleep 5 diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index a2d6ea7b0f..3628be9f99 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14 +FROM golang:1.15 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 602de7547d..be6b78366b 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -8,6 +8,14 @@ docker: # ABCI testing). app: go build -o build/app -tags badgerdb,boltdb,cleveldb,rocksdb ./app + +# To be used primarily by the e2e docker instance. If you want to produce this binary +# elsewhere, then run go build in the maverick directory. +maverick: + go build -o build/maverick -tags badgerdb,boltdb,cleveldb,rocksdb ../maverick + +generator: + go build -o build/generator ./generator generator: go build -o build/generator ./generator @@ -15,4 +23,4 @@ generator: runner: go build -o build/runner ./runner -.PHONY: all app docker generator runner +.PHONY: all app docker generator maverick runner diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 8ae3e13487..989730a70c 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -199,13 +199,15 @@ func (app *Application) validatorUpdates(height uint64) (abci.ValidatorUpdates, if len(updates) == 0 { return nil, nil } + valUpdates := abci.ValidatorUpdates{} for keyString, power := range updates { + keyBytes, err := base64.StdEncoding.DecodeString(keyString) if err != nil { return nil, fmt.Errorf("invalid base64 pubkey value %q: %w", keyString, err) } - valUpdates = append(valUpdates, abci.Ed25519ValidatorUpdate(keyBytes, int64(power))) + valUpdates = append(valUpdates, abci.UpdateValidator(keyBytes, int64(power), app.cfg.KeyType)) } return valUpdates, nil } diff --git a/test/e2e/app/config.go b/test/e2e/app/config.go index 20df6ce90f..38c967916d 100644 --- a/test/e2e/app/config.go +++ b/test/e2e/app/config.go @@ -21,6 +21,8 @@ type Config struct { PrivValServer string `toml:"privval_server"` PrivValKey string `toml:"privval_key"` PrivValState string `toml:"privval_state"` + Misbehaviors map[string]string `toml:"misbehaviors"` + KeyType string `toml:"key_type"` } // LoadConfig loads the configuration from disk. diff --git a/test/e2e/app/main.go b/test/e2e/app/main.go index d057415330..7dbad5fd84 100644 --- a/test/e2e/app/main.go +++ b/test/e2e/app/main.go @@ -5,10 +5,14 @@ import ( "fmt" "os" "path/filepath" + "strconv" "time" + "github.com/spf13/viper" + "github.com/lazyledger/lazyledger-core/abci/server" "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/crypto/ed25519" tmflags "github.com/lazyledger/lazyledger-core/libs/cli/flags" "github.com/lazyledger/lazyledger-core/libs/log" tmnet "github.com/lazyledger/lazyledger-core/libs/net" @@ -16,7 +20,8 @@ import ( "github.com/lazyledger/lazyledger-core/p2p" "github.com/lazyledger/lazyledger-core/privval" "github.com/lazyledger/lazyledger-core/proxy" - "github.com/spf13/viper" + mcs "github.com/lazyledger/lazyledger-core/test/maverick/consensus" + maverick "github.com/lazyledger/lazyledger-core/test/maverick/node" ) var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) @@ -60,7 +65,11 @@ func run(configFile string) error { case "socket", "grpc": err = startApp(cfg) case "builtin": - err = startNode(cfg) + if len(cfg.Misbehaviors) == 0 { + err = startNode(cfg) + } else { + err = startMaverick(cfg) + } default: err = fmt.Errorf("invalid protocol %q", cfg.Protocol) } @@ -102,51 +111,63 @@ func startNode(cfg *Config) error { return err } - home := os.Getenv("TMHOME") - if home == "" { - return errors.New("TMHOME not set") + tmcfg, nodeLogger, nodeKey, err := setupNode() + if err != nil { + return fmt.Errorf("failed to setup config: %w", err) } - viper.AddConfigPath(filepath.Join(home, "config")) - viper.SetConfigName("config") - err = viper.ReadInConfig() + + pval, err := privval.LoadOrGenFilePV(tmcfg.PrivValidatorKeyFile(), tmcfg.PrivValidatorStateFile()) if err != nil { return err } - tmcfg := config.DefaultConfig() - err = viper.Unmarshal(tmcfg) + n, err := node.NewNode(tmcfg, + pval, + *nodeKey, + proxy.NewLocalClientCreator(app), + node.DefaultGenesisDocProviderFunc(tmcfg), + node.DefaultDBProvider, + node.DefaultMetricsProvider(tmcfg.Instrumentation), + nodeLogger, + ) if err != nil { return err } - tmcfg.SetRoot(home) - if err = tmcfg.ValidateBasic(); err != nil { - return fmt.Errorf("error in config file: %v", err) - } - if tmcfg.LogFormat == config.LogFormatJSON { - logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) - } - logger, err = tmflags.ParseLogLevel(tmcfg.LogLevel, logger, config.DefaultLogLevel()) + return n.Start() +} + +// startMaverick starts a Maverick node that runs the application directly. It assumes the Tendermint +// configuration is in $TMHOME/config/tendermint.toml. +func startMaverick(cfg *Config) error { + app, err := NewApplication(cfg) if err != nil { return err } - logger = logger.With("module", "main") - nodeKey, err := p2p.LoadOrGenNodeKey(tmcfg.NodeKeyFile()) + tmcfg, logger, nodeKey, err := setupNode() if err != nil { - return fmt.Errorf("failed to load or gen node key %s: %w", tmcfg.NodeKeyFile(), err) + return fmt.Errorf("failed to setup config: %w", err) } - n, err := node.NewNode(tmcfg, - privval.LoadOrGenFilePV(tmcfg.PrivValidatorKeyFile(), tmcfg.PrivValidatorStateFile()), - nodeKey, + misbehaviors := make(map[int64]mcs.Misbehavior, len(cfg.Misbehaviors)) + for heightString, misbehaviorString := range cfg.Misbehaviors { + height, _ := strconv.ParseInt(heightString, 10, 64) + misbehaviors[height] = mcs.MisbehaviorList[misbehaviorString] + } + + n, err := maverick.NewNode(tmcfg, + maverick.LoadOrGenFilePV(tmcfg.PrivValidatorKeyFile(), tmcfg.PrivValidatorStateFile()), + *nodeKey, proxy.NewLocalClientCreator(app), - node.DefaultGenesisDocProviderFunc(tmcfg), - node.DefaultDBProvider, - node.DefaultMetricsProvider(tmcfg.Instrumentation), + maverick.DefaultGenesisDocProviderFunc(tmcfg), + maverick.DefaultDBProvider, + maverick.DefaultMetricsProvider(tmcfg.Instrumentation), logger, + misbehaviors, ) if err != nil { return err } + return n.Start() } @@ -158,7 +179,7 @@ func startSigner(cfg *Config) error { var dialFn privval.SocketDialer switch protocol { case "tcp": - dialFn = privval.DialTCPFn(address, 3*time.Second, filePV.Key.PrivKey) + dialFn = privval.DialTCPFn(address, 3*time.Second, ed25519.GenPrivKey()) case "unix": dialFn = privval.DialUnixFn(address) default: @@ -175,3 +196,42 @@ func startSigner(cfg *Config) error { logger.Info(fmt.Sprintf("Remote signer connecting to %v", cfg.PrivValServer)) return nil } + +func setupNode() (*config.Config, log.Logger, *p2p.NodeKey, error) { + var tmcfg *config.Config + + home := os.Getenv("TMHOME") + if home == "" { + return nil, nil, nil, errors.New("TMHOME not set") + } + viper.AddConfigPath(filepath.Join(home, "config")) + viper.SetConfigName("config") + err := viper.ReadInConfig() + if err != nil { + return nil, nil, nil, err + } + tmcfg = config.DefaultConfig() + err = viper.Unmarshal(tmcfg) + if err != nil { + return nil, nil, nil, err + } + tmcfg.SetRoot(home) + if err = tmcfg.ValidateBasic(); err != nil { + return nil, nil, nil, fmt.Errorf("error in config file: %w", err) + } + if tmcfg.LogFormat == config.LogFormatJSON { + logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) + } + nodeLogger, err := tmflags.ParseLogLevel(tmcfg.LogLevel, logger, config.DefaultLogLevel()) + if err != nil { + return nil, nil, nil, err + } + nodeLogger = nodeLogger.With("module", "main") + + nodeKey, err := p2p.LoadOrGenNodeKey(tmcfg.NodeKeyFile()) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to load or gen node key %s: %w", tmcfg.NodeKeyFile(), err) + } + + return tmcfg, nodeLogger, &nodeKey, nil +} diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 273bd07c6c..668c7f83ec 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -18,6 +18,7 @@ RUN go mod download COPY . . RUN make build && cp build/tendermint /usr/bin/tendermint COPY test/e2e/docker/entrypoint* /usr/bin/ +RUN cd test/e2e && make maverick && cp build/maverick /usr/bin/maverick RUN cd test/e2e && make app && cp build/app /usr/bin/app # Set up runtime directory. We don't use a separate runtime image since we need @@ -28,5 +29,5 @@ ENV TMHOME=/tendermint EXPOSE 26656 26657 26660 ENTRYPOINT ["/usr/bin/entrypoint"] -CMD ["node"] +CMD ["start"] STOPSIGNAL SIGTERM diff --git a/test/e2e/docker/entrypoint-maverick b/test/e2e/docker/entrypoint-maverick new file mode 100755 index 0000000000..9469e24473 --- /dev/null +++ b/test/e2e/docker/entrypoint-maverick @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Forcibly remove any stray UNIX sockets left behind from previous runs +rm -rf /var/run/privval.sock /var/run/app.sock + +/usr/bin/app /tendermint/config/app.toml & + +sleep 1 + +/usr/bin/maverick "$@" diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 8b102dabda..b8109c6e74 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -4,9 +4,11 @@ import ( "fmt" "math/rand" "sort" + "strconv" "strings" e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" + "github.com/lazyledger/lazyledger-core/types" ) var ( @@ -21,16 +23,14 @@ var ( map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, }, "validators": {"genesis", "initchain"}, + "keyType": {types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1}, } // The following specify randomly chosen values for testnet nodes. - nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} - // FIXME disabled grpc due to https://github.com/tendermint/tendermint/issues/5439 - nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin"} // "grpc" - nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp"} - // FIXME disabled v1 due to https://github.com/tendermint/tendermint/issues/5444 - // FIXME disabled v2 due to https://github.com/tendermint/tendermint/issues/5513 - nodeFastSyncs = uniformChoice{"", "v0"} // "v1", "v2" + nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} + nodeABCIProtocols = uniformChoice{"unix", "tcp", "grpc", "builtin"} + nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp"} + nodeFastSyncs = uniformChoice{"", "v0", "v2"} nodeStateSyncs = uniformChoice{false, true} nodePersistIntervals = uniformChoice{0, 1, 5} nodeSnapshotIntervals = uniformChoice{0, 3} @@ -38,9 +38,15 @@ var ( nodePerturbations = probSetChoice{ "disconnect": 0.1, "pause": 0.1, - // FIXME disabled due to https://github.com/tendermint/tendermint/issues/5422 - // "kill": 0.1, - // "restart": 0.1, + "kill": 0.1, + "restart": 0.1, + } + nodeMisbehaviors = weightedChoice{ + // FIXME evidence disabled due to node panicing when not + // having sufficient block history to process evidence. + // https://github.com/tendermint/tendermint/issues/5617 + // misbehaviorOption{"double-prevote"}: 1, + misbehaviorOption{}: 9, } ) @@ -66,6 +72,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er Validators: &map[string]int64{}, ValidatorUpdates: map[string]map[string]int64{}, Nodes: map[string]*e2e.ManifestNode{}, + KeyType: opt["keyType"].(string), } var numSeeds, numValidators, numFulls int @@ -85,7 +92,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { - manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode(r, e2e.ModeSeed, 0, false) + manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( + r, e2e.ModeSeed, 0, manifest.InitialHeight, false) } // Next, we generate validators. We make sure a BFT quorum of validators start @@ -100,7 +108,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) - manifest.Nodes[name] = generateNode(r, e2e.ModeValidator, startAt, i <= 2) + manifest.Nodes[name] = generateNode( + r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) if startAt == 0 { (*manifest.Validators)[name] = int64(30 + r.Intn(71)) @@ -128,7 +137,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er startAt = nextStartAt nextStartAt += 5 } - manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode(r, e2e.ModeFull, startAt, false) + manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( + r, e2e.ModeFull, startAt, manifest.InitialHeight, false) } // We now set up peer discovery for nodes. Seed nodes are fully meshed with @@ -177,7 +187,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // generating invalid configurations. We do not set Seeds or PersistentPeers // here, since we need to know the overall network topology and startup // sequencing. -func generateNode(r *rand.Rand, mode e2e.Mode, startAt int64, forceArchive bool) *e2e.ManifestNode { +func generateNode( + r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, +) *e2e.ManifestNode { node := e2e.ManifestNode{ Mode: string(mode), StartAt: startAt, @@ -199,6 +211,17 @@ func generateNode(r *rand.Rand, mode e2e.Mode, startAt int64, forceArchive bool) node.SnapshotInterval = 3 } + if node.Mode == "validator" { + misbehaveAt := startAt + 5 + int64(r.Intn(10)) + if startAt == 0 { + misbehaveAt += initialHeight - 1 + } + node.Misbehaviors = nodeMisbehaviors.Choose(r).(misbehaviorOption).atHeight(misbehaveAt) + if len(node.Misbehaviors) != 0 { + node.PrivvalProtocol = "file" + } + } + // If a node which does not persist state also does not retain blocks, randomly // choose to either persist state or retain all blocks. if node.PersistInterval != nil && *node.PersistInterval == 0 && node.RetainBlocks > 0 { @@ -226,3 +249,16 @@ func generateNode(r *rand.Rand, mode e2e.Mode, startAt int64, forceArchive bool) func ptrUint64(i uint64) *uint64 { return &i } + +type misbehaviorOption struct { + misbehavior string +} + +func (m misbehaviorOption) atHeight(height int64) map[string]string { + misbehaviorMap := make(map[string]string) + if m.misbehavior == "" { + return misbehaviorMap + } + misbehaviorMap[strconv.Itoa(int(height))] = m.misbehavior + return misbehaviorMap +} diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index ce73ccd979..8dfffd9535 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -9,7 +9,8 @@ import ( "path/filepath" "github.com/spf13/cobra" - "github.com/tendermint/tendermint/libs/log" + + "github.com/lazyledger/lazyledger-core/libs/log" ) const ( diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go index 04d1ac70de..ec59a01b2a 100644 --- a/test/e2e/generator/random.go +++ b/test/e2e/generator/random.go @@ -57,7 +57,7 @@ func (uc uniformChoice) Choose(r *rand.Rand) interface{} { } // weightedChoice chooses a single random key from a map of keys and weights. -type weightedChoice map[interface{}]uint // nolint:unused +type weightedChoice map[interface{}]uint func (wc weightedChoice) Choose(r *rand.Rand) interface{} { total := 0 diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 2814a9dd56..520762ce94 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -1,4 +1,4 @@ -# This testnet is (will be) run by CI, and attempts to cover a broad range of +# This testnet is run by CI, and attempts to cover a broad range of # functionality with a single network. initial_height = 1000 @@ -36,6 +36,7 @@ seeds = ["seed01"] seeds = ["seed01"] snapshot_interval = 5 perturb = ["disconnect"] +misbehaviors = { 1018 = "double-prevote" } [node.validator02] seeds = ["seed02"] @@ -43,28 +44,21 @@ database = "boltdb" abci_protocol = "tcp" privval_protocol = "tcp" persist_interval = 0 -# FIXME The WAL gets corrupted when restarted -# https://github.com/lazyledger/lazyledger-core/issues/5422 -#perturb = ["restart"] +perturb = ["restart"] [node.validator03] seeds = ["seed01"] database = "badgerdb" -# FIXME Should use grpc, but it has race conditions -# https://github.com/lazyledger/lazyledger-core/issues/5439 -abci_protocol = "unix" +abci_protocol = "grpc" privval_protocol = "unix" persist_interval = 3 retain_blocks = 3 -# FIXME The WAL gets corrupted when killed -# https://github.com/lazyledger/lazyledger-core/issues/5422 -#perturb = ["kill"] +perturb = ["kill"] [node.validator04] persistent_peers = ["validator01"] database = "rocksdb" abci_protocol = "builtin" -retain_blocks = 1 perturb = ["pause"] [node.validator05] @@ -72,24 +66,17 @@ start_at = 1005 # Becomes part of the validator set at 1010 seeds = ["seed02"] database = "cleveldb" fast_sync = "v0" -# FIXME Should use grpc, but it has race conditions -# https://github.com/lazyledger/lazyledger-core/issues/5439 -abci_protocol = "tcp" +abci_protocol = "grpc" privval_protocol = "tcp" -# FIXME The WAL gets corrupted when killed -# https://github.com/lazyledger/lazyledger-core/issues/5422 -#perturb = ["kill", "pause", "disconnect", "restart"] +perturb = ["kill", "pause", "disconnect", "restart"] [node.full01] start_at = 1010 mode = "full" -# FIXME Should use v1, but it won't catch up since some nodes don't have all blocks -# https://github.com/lazyledger/lazyledger-core/issues/5444 fast_sync = "v2" persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] -# FIXME The WAL gets corrupted when restarted -# https://github.com/lazyledger/lazyledger-core/issues/5422 -#perturb = ["restart"] +retain_blocks = 1 +perturb = ["restart"] [node.full02] start_at = 1015 @@ -97,6 +84,4 @@ mode = "full" fast_sync = "v2" state_sync = true seeds = ["seed01"] -# FIXME The WAL gets corrupted when restarted -# https://github.com/lazyledger/lazyledger-core/issues/5422 -#perturb = ["restart"] +perturb = ["restart"] diff --git a/test/e2e/networks/simple.toml b/test/e2e/networks/simple.toml index 96b81f79fe..37f711a91f 100644 --- a/test/e2e/networks/simple.toml +++ b/test/e2e/networks/simple.toml @@ -2,3 +2,4 @@ [node.validator02] [node.validator03] [node.validator04] + diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index c951d9409a..e94fd07903 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -46,6 +46,10 @@ type Manifest struct { // Nodes specifies the network nodes. At least one node must be given. Nodes map[string]*ManifestNode `toml:"node"` + + // KeyType sets the curve that will be used by validators. + // Options are ed25519 & secp256k1 + KeyType string `toml:"key_type"` } // ManifestNode represents a node in a testnet manifest. @@ -83,7 +87,7 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // FastSync specifies the fast sync mode: "" (disable), "v0", "v1", or "v2". + // FastSync specifies the fast sync mode: "" (disable), "v0" or "v2". // Defaults to disabled. FastSync string `toml:"fast_sync"` @@ -115,6 +119,16 @@ type ManifestNode struct { // pause: temporarily pauses (freezes) the node // restart: restarts the node, shutting it down with SIGTERM Perturb []string `toml:"perturb"` + + // Misbehaviors sets how a validator behaves during consensus at a + // certain height. Multiple misbehaviors at different heights can be used + // + // An example of misbehaviors + // { 10 = "double-prevote", 20 = "double-prevote"} + // + // For more information, look at the readme in the maverick folder. + // A list of all behaviors can be found in ../maverick/consensus/behavior.go + Misbehaviors map[string]string `toml:"misbehaviors"` } // Save saves the testnet manifest to a file. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index ac0737c22b..fd3e384303 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -14,7 +14,10 @@ import ( "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/ed25519" + "github.com/lazyledger/lazyledger-core/crypto/secp256k1" rpchttp "github.com/lazyledger/lazyledger-core/rpc/client/http" + mcs "github.com/lazyledger/lazyledger-core/test/maverick/consensus" + "github.com/lazyledger/lazyledger-core/types" ) const ( @@ -56,6 +59,7 @@ type Testnet struct { Validators map[*Node]int64 ValidatorUpdates map[int64]map[*Node]int64 Nodes []*Node + KeyType string } // Node represents a Tendermint node in a testnet. @@ -63,7 +67,8 @@ type Node struct { Name string Testnet *Testnet Mode Mode - Key crypto.PrivKey + PrivvalKey crypto.PrivKey + NodeKey crypto.PrivKey IP net.IP ProxyPort uint32 StartAt int64 @@ -78,6 +83,7 @@ type Node struct { Seeds []*Node PersistentPeers []*Node Perturbations []Perturbation + Misbehaviors map[int64]string } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -116,6 +122,10 @@ func LoadTestnet(file string) (*Testnet, error) { Validators: map[*Node]int64{}, ValidatorUpdates: map[int64]map[*Node]int64{}, Nodes: []*Node{}, + KeyType: "ed25519", + } + if len(manifest.KeyType) != 0 { + testnet.KeyType = manifest.KeyType } if manifest.InitialHeight > 0 { testnet.InitialHeight = manifest.InitialHeight @@ -133,7 +143,8 @@ func LoadTestnet(file string) (*Testnet, error) { node := &Node{ Name: name, Testnet: testnet, - Key: keyGen.Generate(), + PrivvalKey: keyGen.Generate(manifest.KeyType), + NodeKey: keyGen.Generate("ed25519"), IP: ipGen.Next(), ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, @@ -147,6 +158,10 @@ func LoadTestnet(file string) (*Testnet, error) { SnapshotInterval: nodeManifest.SnapshotInterval, RetainBlocks: nodeManifest.RetainBlocks, Perturbations: []Perturbation{}, + Misbehaviors: make(map[int64]string), + } + if node.StartAt == testnet.InitialHeight { + node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this } if nodeManifest.Mode != "" { node.Mode = Mode(nodeManifest.Mode) @@ -166,6 +181,13 @@ func LoadTestnet(file string) (*Testnet, error) { for _, p := range nodeManifest.Perturb { node.Perturbations = append(node.Perturbations, Perturbation(p)) } + for heightString, misbehavior := range nodeManifest.Misbehaviors { + height, err := strconv.ParseInt(heightString, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse height %s to int64: %w", heightString, err) + } + node.Misbehaviors[height] = misbehavior + } testnet.Nodes = append(testnet.Nodes, node) } @@ -250,6 +272,11 @@ func (t Testnet) Validate() error { if len(t.Nodes) == 0 { return errors.New("network has no nodes") } + switch t.KeyType { + case "", types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1: + default: + return errors.New("unsupported KeyType") + } for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) @@ -280,7 +307,7 @@ func (n Node) Validate(testnet Testnet) error { } } switch n.FastSync { - case "", "v0", "v1", "v2": + case "", "v0", "v2": default: return fmt.Errorf("invalid fast sync setting %q", n.FastSync) } @@ -324,6 +351,31 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid perturbation %q", perturbation) } } + + if (n.PrivvalProtocol != "file" || n.Mode != "validator") && len(n.Misbehaviors) != 0 { + return errors.New("must be using \"file\" privval protocol to implement misbehaviors") + } + + for height, misbehavior := range n.Misbehaviors { + if height < n.StartAt { + return fmt.Errorf("misbehavior height %d is below node start height %d", + height, n.StartAt) + } + if height < testnet.InitialHeight { + return fmt.Errorf("misbehavior height %d is below network initial height %d", + height, testnet.InitialHeight) + } + exists := false + for possibleBehaviors := range mcs.MisbehaviorList { + if possibleBehaviors == misbehavior { + exists = true + } + } + if !exists { + return fmt.Errorf("misbehavior %s does not exist", misbehavior) + } + } + return nil } @@ -365,6 +417,29 @@ func (t Testnet) IPv6() bool { return t.IP.IP.To4() == nil } +// HasPerturbations returns whether the network has any perturbations. +func (t Testnet) HasPerturbations() bool { + for _, node := range t.Nodes { + if len(node.Perturbations) > 0 { + return true + } + } + return false +} + +// LastMisbehaviorHeight returns the height of the last misbehavior. +func (t Testnet) LastMisbehaviorHeight() int64 { + lastHeight := int64(0) + for _, node := range t.Nodes { + for height := range node.Misbehaviors { + if height > lastHeight { + lastHeight = height + } + } + } + return lastHeight +} + // Address returns a P2P endpoint address for the node. func (n Node) AddressP2P(withID bool) string { ip := n.IP.String() @@ -374,7 +449,7 @@ func (n Node) AddressP2P(withID bool) string { } addr := fmt.Sprintf("%v:26656", ip) if withID { - addr = fmt.Sprintf("%x@%v", n.Key.PubKey().Address().Bytes(), addr) + addr = fmt.Sprintf("%x@%v", n.NodeKey.PubKey().Address().Bytes(), addr) } return addr } @@ -405,15 +480,21 @@ func newKeyGenerator(seed int64) *keyGenerator { } } -func (g *keyGenerator) Generate() crypto.PrivKey { +func (g *keyGenerator) Generate(keyType string) crypto.PrivKey { seed := make([]byte, ed25519.SeedSize) _, err := io.ReadFull(g.random, seed) if err != nil { panic(err) // this shouldn't happen } - - return ed25519.GenPrivKeyFromSecret(seed) + switch keyType { + case "secp256k1": + return secp256k1.GenPrivKeySecp256k1(seed) + case "", "ed25519": + return ed25519.GenPrivKeyFromSecret(seed) + default: + panic("KeyType not supported") // should not make it this far + } } // portGenerator generates local Docker proxy ports for each node. diff --git a/test/e2e/run-multiple.sh b/test/e2e/run-multiple.sh index e9699cf1e7..5d6a20ef95 100755 --- a/test/e2e/run-multiple.sh +++ b/test/e2e/run-multiple.sh @@ -3,33 +3,47 @@ # This is a convenience script that takes a list of testnet manifests # as arguments and runs each one of them sequentially. If a testnet # fails, the container logs are dumped to stdout along with the testnet -# manifest. +# manifest, but the remaining testnets are still run. # # This is mostly used to run generated networks in nightly CI jobs. # -# Don't set -e, since we explicitly check status codes ourselves. -set -u +set -euo pipefail if [[ $# == 0 ]]; then echo "Usage: $0 [MANIFEST...]" >&2 exit 1 fi +FAILED=() + for MANIFEST in "$@"; do START=$SECONDS echo "==> Running testnet $MANIFEST..." - ./build/runner -f "$MANIFEST" - if [[ $? -ne 0 ]]; then + if ! ./build/runner -f "$MANIFEST"; then echo "==> Testnet $MANIFEST failed, dumping manifest..." cat "$MANIFEST" echo "==> Dumping container logs for $MANIFEST..." ./build/runner -f "$MANIFEST" logs - exit 1 + + echo "==> Cleaning up failed testnet $MANIFEST..." + ./build/runner -f "$MANIFEST" cleanup + + FAILED+=("$MANIFEST") fi echo "==> Completed testnet $MANIFEST in $(( SECONDS - START ))s" echo "" done + +if [[ ${#FAILED[@]} -ne 0 ]]; then + echo "${#FAILED[@]} testnets failed:" + for MANIFEST in "${FAILED[@]}"; do + echo "- $MANIFEST" + done + exit 1 +else + echo "All testnets successful" +fi diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index d861078270..fed5715326 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -11,44 +11,73 @@ import ( // Cleanup removes the Docker Compose containers and testnet directory. func Cleanup(testnet *e2e.Testnet) error { - if testnet.Dir == "" { - return errors.New("no directory set") + err := cleanupDocker() + if err != nil { + return err } - _, err := os.Stat(testnet.Dir) - if os.IsNotExist(err) { - return nil - } else if err != nil { + err = cleanupDir(testnet.Dir) + if err != nil { return err } + return nil +} +// cleanupDocker removes all E2E resources (with label e2e=True), regardless +// of testnet. +func cleanupDocker() error { logger.Info("Removing Docker containers and networks") - err = execCompose(testnet.Dir, "stop") + + // GNU xargs requires the -r flag to not run when input is empty, macOS + // does this by default. Ugly, but works. + xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` + + err := exec("bash", "-c", fmt.Sprintf( + "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) + if err != nil { + return err + } + + err = exec("bash", "-c", fmt.Sprintf( + "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) if err != nil { return err } + return nil +} + +// cleanupDir cleans up a testnet directory +func cleanupDir(dir string) error { + if dir == "" { + return errors.New("no directory set") + } + + _, err := os.Stat(dir) + if os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + logger.Info(fmt.Sprintf("Removing testnet directory %q", dir)) + // On Linux, some local files in the volume will be owned by root since Tendermint // runs as root inside the container, so we need to clean them up from within a // container running as root too. - absDir, err := filepath.Abs(testnet.Dir) + absDir, err := filepath.Abs(dir) if err != nil { return err } - err = execDocker("run", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), + err = execDocker("run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), "tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/") if err != nil { return err } - err = execCompose(testnet.Dir, "down") + err = os.RemoveAll(dir) if err != nil { return err } - logger.Info(fmt.Sprintf("Removing testnet directory %q", testnet.Dir)) - err = os.RemoveAll(testnet.Dir) - if err != nil { - return err - } return nil } diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 516179fa97..7c7146912a 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -5,12 +5,15 @@ import ( "fmt" "os" + "github.com/spf13/cobra" + "github.com/lazyledger/lazyledger-core/libs/log" e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" - "github.com/spf13/cobra" ) -var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +var ( + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +) func main() { NewCLI().Run() @@ -18,8 +21,9 @@ func main() { // CLI is the Cobra-based command-line interface. type CLI struct { - root *cobra.Command - testnet *e2e.Testnet + root *cobra.Command + testnet *e2e.Testnet + preserve bool } // NewCLI sets up the CLI. @@ -65,13 +69,28 @@ func NewCLI() *CLI { if err := Start(cli.testnet); err != nil { return err } - if err := Perturb(cli.testnet); err != nil { - return err + + if lastMisbehavior := cli.testnet.LastMisbehaviorHeight(); lastMisbehavior > 0 { + // wait for misbehaviors before starting perturbations. We do a separate + // wait for another 5 blocks, since the last misbehavior height may be + // in the past depending on network startup ordering. + if err := WaitUntil(cli.testnet, lastMisbehavior); err != nil { + return err + } } if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through return err } + if cli.testnet.HasPerturbations() { + if err := Perturb(cli.testnet); err != nil { + return err + } + if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through + return err + } + } + loadCancel() if err := <-chLoadResult; err != nil { return err @@ -82,8 +101,10 @@ func NewCLI() *CLI { if err := Test(cli.testnet); err != nil { return err } - if err := Cleanup(cli.testnet); err != nil { - return err + if !cli.preserve { + if err := Cleanup(cli.testnet); err != nil { + return err + } } return nil }, @@ -92,6 +113,9 @@ func NewCLI() *CLI { cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest") _ = cli.root.MarkPersistentFlagRequired("file") + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, + "Preserves the running of the test net after tests are completed") + cli.root.AddCommand(&cobra.Command{ Use: "setup", Short: "Generates the testnet directory and configuration", diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index cfa43a8cda..1f883dc3f5 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -12,11 +12,13 @@ import ( "path/filepath" "regexp" "sort" + "strconv" "strings" "text/template" "time" "github.com/BurntSushi/toml" + "github.com/lazyledger/lazyledger-core/config" "github.com/lazyledger/lazyledger-core/crypto/ed25519" "github.com/lazyledger/lazyledger-core/p2p" @@ -94,12 +96,12 @@ func Setup(testnet *e2e.Testnet) error { return err } - err = (&p2p.NodeKey{PrivKey: node.Key}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) + err = (&p2p.NodeKey{PrivKey: node.NodeKey}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) if err != nil { return err } - (privval.NewFilePV(node.Key, + (privval.NewFilePV(node.PrivvalKey, filepath.Join(nodeDir, PrivvalKeyFile), filepath.Join(nodeDir, PrivvalStateFile), )).Save() @@ -118,10 +120,25 @@ func Setup(testnet *e2e.Testnet) error { // MakeDockerCompose generates a Docker Compose config for a testnet. func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { // Must use version 2 Docker Compose format, to support IPv6. - tmpl, err := template.New("docker-compose").Parse(`version: '2.4' + tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ + "misbehaviorsToString": func(misbehaviors map[int64]string) string { + str := "" + for height, misbehavior := range misbehaviors { + // after the first behavior set, a comma must be prepended + if str != "" { + str += "," + } + heightString := strconv.Itoa(int(height)) + str += misbehavior + "," + heightString + } + return str + }, + }).Parse(`version: '2.4' networks: {{ .Name }}: + labels: + e2e: true driver: bridge {{- if .IPv6 }} enable_ipv6: true @@ -134,10 +151,15 @@ networks: services: {{- range .Nodes }} {{ .Name }}: + labels: + e2e: true container_name: {{ .Name }} image: tendermint/e2e-node {{- if eq .ABCIProtocol "builtin" }} entrypoint: /usr/bin/entrypoint-builtin +{{- else if .Misbehaviors }} + entrypoint: /usr/bin/entrypoint-maverick + command: ["start", "--misbehaviors", "{{ misbehaviorsToString .Misbehaviors }}"] {{- end }} init: true ports: @@ -169,11 +191,18 @@ func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { ConsensusParams: types.DefaultConsensusParams(), InitialHeight: testnet.InitialHeight, } + switch testnet.KeyType { + case "", types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1: + genesis.ConsensusParams.Validator.PubKeyTypes = + append(genesis.ConsensusParams.Validator.PubKeyTypes, types.ABCIPubKeyTypeSecp256k1) + default: + return genesis, errors.New("unsupported KeyType") + } for validator, power := range testnet.Validators { genesis.Validators = append(genesis.Validators, types.GenesisValidator{ Name: validator.Name, - Address: validator.Key.PubKey().Address(), - PubKey: validator.Key.PubKey(), + Address: validator.PrivvalKey.PubKey().Address(), + PubKey: validator.PrivvalKey.PubKey(), Power: power, }) } @@ -295,6 +324,7 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "persist_interval": node.PersistInterval, "snapshot_interval": node.SnapshotInterval, "retain_blocks": node.RetainBlocks, + "key_type": node.PrivvalKey.Type(), } switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -325,13 +355,18 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { return nil, fmt.Errorf("unexpected privval protocol setting %q", node.PrivvalProtocol) } } + misbehaviors := make(map[string]string) + for height, misbehavior := range node.Misbehaviors { + misbehaviors[strconv.Itoa(int(height))] = misbehavior + } + cfg["misbehaviors"] = misbehaviors if len(node.Testnet.ValidatorUpdates) > 0 { validatorUpdates := map[string]map[string]int64{} for height, validators := range node.Testnet.ValidatorUpdates { updateVals := map[string]int64{} for node, power := range validators { - updateVals[base64.StdEncoding.EncodeToString(node.Key.PubKey().Bytes())] = power + updateVals[base64.StdEncoding.EncodeToString(node.PrivvalKey.PubKey().Bytes())] = power } validatorUpdates[fmt.Sprintf("%v", height)] = updateVals } @@ -356,7 +391,7 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { if err != nil { return err } - bz = regexp.MustCompile(`(?m)^trust_height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_height = %v`, height))) - bz = regexp.MustCompile(`(?m)^trust_hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_hash = "%X"`, hash))) + bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) + bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) return ioutil.WriteFile(cfgPath, bz, 0644) } diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index 6b5cb99eed..c2af2af311 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -10,11 +10,30 @@ import ( func Start(testnet *e2e.Testnet) error { - // Sort nodes by starting order + // Nodes are already sorted by name. Sort them by name then startAt, + // which gives the overall order startAt, mode, name. nodeQueue := testnet.Nodes + sort.SliceStable(nodeQueue, func(i, j int) bool { + a, b := nodeQueue[i], nodeQueue[j] + switch { + case a.Mode == b.Mode: + return false + case a.Mode == e2e.ModeSeed: + return true + case a.Mode == e2e.ModeValidator && b.Mode == e2e.ModeFull: + return true + } + return false + }) sort.SliceStable(nodeQueue, func(i, j int) bool { return nodeQueue[i].StartAt < nodeQueue[j].StartAt }) + if len(nodeQueue) == 0 { + return fmt.Errorf("no nodes in testnet") + } + if nodeQueue[0].StartAt > 0 { + return fmt.Errorf("no initial nodes in testnet") + } // Start initial nodes (StartAt: 0) logger.Info("Starting initial network nodes...") diff --git a/test/e2e/runner/wait.go b/test/e2e/runner/wait.go index 3be7ae48fe..e105333fd6 100644 --- a/test/e2e/runner/wait.go +++ b/test/e2e/runner/wait.go @@ -14,9 +14,13 @@ func Wait(testnet *e2e.Testnet, blocks int64) error { if err != nil { return err } - waitFor := block.Height + blocks - logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", waitFor)) - _, err = waitForAllNodes(testnet, waitFor, 20*time.Second) + return WaitUntil(testnet, block.Height+blocks) +} + +// WaitUntil waits until a given height has been reached. +func WaitUntil(testnet *e2e.Testnet, height int64) error { + logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", height)) + _, err := waitForAllNodes(testnet, height, 20*time.Second) if err != nil { return err } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 99fe0bfa4d..7c9831a012 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -6,15 +6,19 @@ import ( "testing" "time" - e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" - "github.com/lazyledger/lazyledger-core/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" + "github.com/lazyledger/lazyledger-core/types" ) // Tests that any initial state given in genesis has made it into the app. func TestApp_InitialState(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } if len(node.Testnet.InitialState) == 0 { return } @@ -34,6 +38,10 @@ func TestApp_InitialState(t *testing.T) { // block and the node sync status. func TestApp_Hash(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + client, err := node.Client() require.NoError(t, err) info, err := client.ABCIInfo(ctx) @@ -55,6 +63,10 @@ func TestApp_Hash(t *testing.T) { // Tests that we can set a value and retrieve it. func TestApp_Tx(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + client, err := node.Client() require.NoError(t, err) diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index 1e29528e3d..56aa2183ec 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -3,15 +3,20 @@ package e2e_test import ( "testing" - e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" ) // Tests that block headers are identical across nodes where present. func TestBlock_Header(t *testing.T) { blocks := fetchBlockChain(t) testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -41,6 +46,10 @@ func TestBlock_Header(t *testing.T) { // Tests that the node contains the expected block range. func TestBlock_Range(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index 39a60002e7..43248f81d1 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -7,11 +7,12 @@ import ( "sync" "testing" + "github.com/stretchr/testify/require" + rpchttp "github.com/lazyledger/lazyledger-core/rpc/client/http" rpctypes "github.com/lazyledger/lazyledger-core/rpc/core/types" e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" "github.com/lazyledger/lazyledger-core/types" - "github.com/stretchr/testify/require" ) func init() { diff --git a/test/e2e/tests/evidence_test.go b/test/e2e/tests/evidence_test.go new file mode 100644 index 0000000000..cea4897491 --- /dev/null +++ b/test/e2e/tests/evidence_test.go @@ -0,0 +1,57 @@ +package e2e_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" + "github.com/lazyledger/lazyledger-core/types" +) + +// assert that all nodes that have blocks at the height of a misbehavior has evidence +// for that misbehavior +func TestEvidence_Misbehavior(t *testing.T) { + blocks := fetchBlockChain(t) + testNode(t, func(t *testing.T, node e2e.Node) { + seenEvidence := make(map[int64]struct{}) + for _, block := range blocks { + // Find any evidence blaming this node in this block + var nodeEvidence types.Evidence + for _, evidence := range block.Evidence.Evidence { + switch evidence := evidence.(type) { + case *types.DuplicateVoteEvidence: + if bytes.Equal(evidence.VoteA.ValidatorAddress, node.PrivvalKey.PubKey().Address()) { + nodeEvidence = evidence + } + default: + t.Fatalf("unexpected evidence type %T", evidence) + } + } + if nodeEvidence == nil { + continue // no evidence for the node at this height + } + + // Check that evidence was as expected + misbehavior, ok := node.Misbehaviors[nodeEvidence.Height()] + require.True(t, ok, "found unexpected evidence %v in height %v", + nodeEvidence, block.Height) + + switch misbehavior { + case "double-prevote": + require.IsType(t, &types.DuplicateVoteEvidence{}, nodeEvidence, "unexpected evidence type") + default: + t.Fatalf("unknown misbehavior %v", misbehavior) + } + + seenEvidence[nodeEvidence.Height()] = struct{}{} + } + // see if there is any evidence that we were expecting but didn't see + for height, misbehavior := range node.Misbehaviors { + _, ok := seenEvidence[height] + require.True(t, ok, "expected evidence for %v misbehavior at height %v by node but was never found", + misbehavior, height) + } + }) +} diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 2847a18b18..50c52409e4 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -3,8 +3,9 @@ package e2e_test import ( "testing" - e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" "github.com/stretchr/testify/require" + + e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" ) // Tests that all nodes have peered with each other, regardless of discovery method. diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 7107777416..19e117eb8f 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -4,15 +4,20 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/require" + e2e "github.com/lazyledger/lazyledger-core/test/e2e/pkg" "github.com/lazyledger/lazyledger-core/types" - "github.com/stretchr/testify/require" ) // Tests that validator sets are available and correct according to // scheduled validator updates. func TestValidator_Sets(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -55,7 +60,7 @@ func TestValidator_Propose(t *testing.T) { if node.Mode != e2e.ModeValidator { return } - address := node.Key.PubKey().Address() + address := node.PrivvalKey.PubKey().Address() valSchedule := newValidatorSchedule(*node.Testnet) expectCount := 0 @@ -85,7 +90,7 @@ func TestValidator_Sign(t *testing.T) { if node.Mode != e2e.ModeValidator { return } - address := node.Key.PubKey().Address() + address := node.PrivvalKey.PubKey().Address() valSchedule := newValidatorSchedule(*node.Testnet) expectCount := 0 @@ -155,7 +160,7 @@ func (s *validatorSchedule) Increment(heights int64) { func makeVals(valMap map[*e2e.Node]int64) []*types.Validator { vals := make([]*types.Validator, 0, len(valMap)) for node, power := range valMap { - vals = append(vals, types.NewValidator(node.Key.PubKey(), power)) + vals = append(vals, types.NewValidator(node.PrivvalKey.PubKey(), power)) } return vals } diff --git a/test/maverick/README.md b/test/maverick/README.md new file mode 100644 index 0000000000..3082755364 --- /dev/null +++ b/test/maverick/README.md @@ -0,0 +1,51 @@ +# Maverick + +![](https://assets.rollingstone.com/assets/2015/article/tom-cruise-to-fight-drones-in-top-gun-sequel-20150629/201166/large_rect/1435581755/1401x788-Top-Gun-3.jpg) + +A byzantine node used to test Tendermint consensus against a plethora of different faulty misbehaviors. Designed to easily create new faulty misbehaviors to examine how a Tendermint network reacts to the misbehavior. Can also be used for fuzzy testing with different network arrangements. + +## Misbehaviors + +A misbehavior allows control at the following stages as highlighted by the struct below + +```go +type Misbehavior struct { + String string + + EnterPropose func(cs *State, height int64, round int32) + + EnterPrevote func(cs *State, height int64, round int32) + + EnterPrecommit func(cs *State, height int64, round int32) + + ReceivePrevote func(cs *State, prevote *types.Vote) + + ReceivePrecommit func(cs *State, precommit *types.Vote) + + ReceiveProposal func(cs *State, proposal *types.Proposal) error +} +``` + +At each of these events, the node can exhibit a different misbehavior. To create a new misbehavior define a function that builds off the existing default misbehavior and then overrides one or more of these functions. Then append it to the misbehaviors list so the node recognizes it like so: + +```go +var MisbehaviorList = map[string]Misbehavior{ + "double-prevote": DoublePrevoteMisbehavior(), +} +``` + +## Setup + +The maverick node takes most of the functionality from the existing Tendermint CLI. To install this, in the directory of this readme, run: + +```bash +go build +``` + +Use `maverick init` to initialize a single node and `maverick node` to run it. This will run it normally unless you use the misbehaviors flag as follows: + +```bash +maverick node --proxy_app persistent_kvstore --misbehaviors double-vote,10 +``` + +This would cause the node to vote twice in every round at height 10. To add more misbehaviors at different heights, append the next misbehavior and height after the first (with comma separation). diff --git a/test/maverick/consensus/metrics.go b/test/maverick/consensus/metrics.go new file mode 100644 index 0000000000..bbd823a3fc --- /dev/null +++ b/test/maverick/consensus/metrics.go @@ -0,0 +1,220 @@ +package consensus + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "consensus" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Height of the chain. + Height metrics.Gauge + + // ValidatorLastSignedHeight of a validator. + ValidatorLastSignedHeight metrics.Gauge + + // Number of rounds. + Rounds metrics.Gauge + + // Number of validators. + Validators metrics.Gauge + // Total power of all validators. + ValidatorsPower metrics.Gauge + // Power of a validator. + ValidatorPower metrics.Gauge + // Amount of blocks missed by a validator. + ValidatorMissedBlocks metrics.Gauge + // Number of validators who did not sign. + MissingValidators metrics.Gauge + // Total power of the missing validators. + MissingValidatorsPower metrics.Gauge + // Number of validators who tried to double sign. + ByzantineValidators metrics.Gauge + // Total power of the byzantine validators. + ByzantineValidatorsPower metrics.Gauge + + // Time between this and the last block. + BlockIntervalSeconds metrics.Histogram + + // Number of transactions. + NumTxs metrics.Gauge + // Size of the block. + BlockSizeBytes metrics.Gauge + // Total number of transactions. + TotalTxs metrics.Gauge + // The latest block height. + CommittedHeight metrics.Gauge + // Whether or not a node is fast syncing. 1 if yes, 0 if no. + FastSyncing metrics.Gauge + // Whether or not a node is state syncing. 1 if yes, 0 if no. + StateSyncing metrics.Gauge + + // Number of blockparts transmitted by peer. + BlockParts metrics.Counter +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "height", + Help: "Height of the chain.", + }, labels).With(labelsAndValues...), + Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rounds", + Help: "Number of rounds.", + }, labels).With(labelsAndValues...), + + Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators", + Help: "Number of validators.", + }, labels).With(labelsAndValues...), + ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_last_signed_height", + Help: "Last signed height for a validator", + }, append(labels, "validator_address")).With(labelsAndValues...), + ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_missed_blocks", + Help: "Total missed blocks for a validator", + }, append(labels, "validator_address")).With(labelsAndValues...), + ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators_power", + Help: "Total power of all validators.", + }, labels).With(labelsAndValues...), + ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_power", + Help: "Power of a validator", + }, append(labels, "validator_address")).With(labelsAndValues...), + MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators", + Help: "Number of validators who did not sign.", + }, labels).With(labelsAndValues...), + MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators_power", + Help: "Total power of the missing validators.", + }, labels).With(labelsAndValues...), + ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators", + Help: "Number of validators who tried to double sign.", + }, labels).With(labelsAndValues...), + ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators_power", + Help: "Total power of the byzantine validators.", + }, labels).With(labelsAndValues...), + BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_interval_seconds", + Help: "Time between this and the last block.", + }, labels).With(labelsAndValues...), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_txs", + Help: "Number of transactions.", + }, labels).With(labelsAndValues...), + BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_size_bytes", + Help: "Size of the block.", + }, labels).With(labelsAndValues...), + TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_txs", + Help: "Total number of transactions.", + }, labels).With(labelsAndValues...), + CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "latest_block_height", + Help: "The latest block height.", + }, labels).With(labelsAndValues...), + FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "fast_syncing", + Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "state_syncing", + Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_parts", + Help: "Number of blockparts transmitted by peer.", + }, append(labels, "peer_id")).With(labelsAndValues...), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + + ValidatorLastSignedHeight: discard.NewGauge(), + + Rounds: discard.NewGauge(), + + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + ValidatorPower: discard.NewGauge(), + ValidatorMissedBlocks: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + + BlockIntervalSeconds: discard.NewHistogram(), + + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + FastSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), + } +} diff --git a/test/maverick/consensus/misbehavior.go b/test/maverick/consensus/misbehavior.go new file mode 100644 index 0000000000..ee16e698de --- /dev/null +++ b/test/maverick/consensus/misbehavior.go @@ -0,0 +1,398 @@ +package consensus + +import ( + "fmt" + + cstypes "github.com/lazyledger/lazyledger-core/consensus/types" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + "github.com/lazyledger/lazyledger-core/types" +) + +// MisbehaviorList encompasses a list of all possible behaviors +var MisbehaviorList = map[string]Misbehavior{ + "double-prevote": DoublePrevoteMisbehavior(), +} + +type Misbehavior struct { + Name string + + EnterPropose func(cs *State, height int64, round int32) + + EnterPrevote func(cs *State, height int64, round int32) + + EnterPrecommit func(cs *State, height int64, round int32) + + ReceivePrevote func(cs *State, prevote *types.Vote) + + ReceivePrecommit func(cs *State, precommit *types.Vote) + + ReceiveProposal func(cs *State, proposal *types.Proposal) error +} + +// BEHAVIORS + +func DefaultMisbehavior() Misbehavior { + return Misbehavior{ + Name: "default", + EnterPropose: defaultEnterPropose, + EnterPrevote: defaultEnterPrevote, + EnterPrecommit: defaultEnterPrecommit, + ReceivePrevote: defaultReceivePrevote, + ReceivePrecommit: defaultReceivePrecommit, + ReceiveProposal: defaultReceiveProposal, + } +} + +// DoublePrevoteMisbehavior will make a node prevote both nil and a block in the same +// height and round. +func DoublePrevoteMisbehavior() Misbehavior { + b := DefaultMisbehavior() + b.Name = "double-prevote" + b.EnterPrevote = func(cs *State, height int64, round int32) { + + // If a block is locked, prevote that. + if cs.LockedBlock != nil { + cs.Logger.Info("enterPrevote: Already locked on a block, prevoting locked block") + cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + return + } + + // If ProposalBlock is nil, prevote nil. + if cs.ProposalBlock == nil { + cs.Logger.Info("enterPrevote: ProposalBlock is nil") + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Validate proposal block + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if err != nil { + // ProposalBlock is invalid, prevote nil. + cs.Logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + if cs.sw == nil { + cs.Logger.Error("nil switch") + return + } + + prevote, err := cs.signVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + if err != nil { + cs.Logger.Error("enterPrevote: Unable to sign block", "err", err) + } + + nilPrevote, err := cs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + if err != nil { + cs.Logger.Error("enterPrevote: Unable to sign block", "err", err) + } + + // add our own vote + cs.sendInternalMessage(msgInfo{&VoteMessage{prevote}, ""}) + + cs.Logger.Info("Sending conflicting votes") + peers := cs.sw.Peers().List() + // there has to be at least two other peers connected else this behavior works normally + for idx, peer := range peers { + if idx%2 == 0 { // sign the proposal block + peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) + } else { // sign a nil block + peer.Send(VoteChannel, MustEncode(&VoteMessage{nilPrevote})) + } + } + } + return b +} + +// DEFAULTS + +func defaultEnterPropose(cs *State, height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + // If we don't get the proposal and all block parts quick enough, enterPrevote + cs.scheduleTimeout(cs.config.Propose(round), height, round, cstypes.RoundStepPropose) + + // Nothing more to do if we're not a validator + if cs.privValidator == nil { + logger.Debug("This node is not a validator") + return + } + logger.Debug("This node is a validator") + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // If this node is a validator & proposer in the currentx round, it will + // miss the opportunity to create a block. + logger.Error("Error on retrival of pubkey", "err", err) + return + } + address := pubKey.Address() + + // if not a validator, we're done + if !cs.Validators.HasAddress(address) { + logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators) + return + } + + if cs.isProposer(address) { + logger.Info("enterPropose: Our turn to propose", + "proposer", + address, + "privValidator", + cs.privValidator) + cs.decideProposal(height, round) + } else { + logger.Info("enterPropose: Not our turn to propose", + "proposer", + cs.Validators.GetProposer().Address, + "privValidator", + cs.privValidator) + } +} + +func defaultEnterPrevote(cs *State, height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + // If a block is locked, prevote that. + if cs.LockedBlock != nil { + logger.Info("enterPrevote: Already locked on a block, prevoting locked block") + cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + return + } + + // If ProposalBlock is nil, prevote nil. + if cs.ProposalBlock == nil { + logger.Info("enterPrevote: ProposalBlock is nil") + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Validate proposal block + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if err != nil { + // ProposalBlock is invalid, prevote nil. + logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Prevote cs.ProposalBlock + // NOTE: the proposal signature is validated when it is received, + // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) + logger.Info("enterPrevote: ProposalBlock is valid") + cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) +} + +func defaultEnterPrecommit(cs *State, height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + // check for a polka + blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority() + + // If we don't have a polka, we must precommit nil. + if !ok { + if cs.LockedBlock != nil { + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil") + } else { + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") + } + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } + + // At this point +2/3 prevoted for a particular block or nil. + _ = cs.eventBus.PublishEventPolka(cs.RoundStateEvent()) + + // the latest POLRound should be this round. + polRound, _ := cs.Votes.POLInfo() + if polRound < round { + panic(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) + } + + // +2/3 prevoted nil. Unlock and precommit nil. + if len(blockID.Hash) == 0 { + if cs.LockedBlock == nil { + logger.Info("enterPrecommit: +2/3 prevoted for nil.") + } else { + logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + _ = cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + } + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } + + // At this point, +2/3 prevoted for a particular block. + + // If we're already locked on that block, precommit it, and update the LockedRound + if cs.LockedBlock.HashesTo(blockID.Hash) { + logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") + cs.LockedRound = round + _ = cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) + cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + return + } + + // If +2/3 prevoted for proposal block, stage and precommit it + if cs.ProposalBlock.HashesTo(blockID.Hash) { + logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) + // Validate the block. + if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { + panic(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + } + cs.LockedRound = round + cs.LockedBlock = cs.ProposalBlock + cs.LockedBlockParts = cs.ProposalBlockParts + _ = cs.eventBus.PublishEventLock(cs.RoundStateEvent()) + cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + return + } + + // There was a polka in this round for a block we don't have. + // Fetch that block, unlock, and precommit nil. + // The +2/3 prevotes for this round is the POL for our unlock. + logger.Info("enterPrecommit: +2/3 prevotes for a block we don't have. Voting nil", "blockID", blockID) + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + cs.ProposalBlock = nil + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + } + _ = cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) +} + +func defaultReceivePrevote(cs *State, vote *types.Vote) { + height := cs.Height + prevotes := cs.Votes.Prevotes(vote.Round) + + // If +2/3 prevotes for a block or nil for *any* round: + if blockID, ok := prevotes.TwoThirdsMajority(); ok { + + // There was a polka! + // If we're locked but this is a recent polka, unlock. + // If it matches our ProposalBlock, update the ValidBlock + + // Unlock if `cs.LockedRound < vote.Round <= cs.Round` + // NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round + if (cs.LockedBlock != nil) && + (cs.LockedRound < vote.Round) && + (vote.Round <= cs.Round) && + !cs.LockedBlock.HashesTo(blockID.Hash) { + + cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round) + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + _ = cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + } + + // Update Valid* if we can. + // NOTE: our proposal block may be nil or not what received a polka.. + if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { + + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info( + "Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) + cs.ValidRound = vote.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } else { + cs.Logger.Info( + "Valid block we don't know about. Set ProposalBlock=nil", + "proposal", cs.ProposalBlock.Hash(), "blockID", blockID.Hash) + // We're getting the wrong block. + cs.ProposalBlock = nil + } + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + } + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + _ = cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) + } + } + + // If +2/3 prevotes for *anything* for future round: + switch { + case cs.Round < vote.Round && prevotes.HasTwoThirdsAny(): + // Round-skip if there is any 2/3+ of votes ahead of us + cs.enterNewRound(height, vote.Round) + case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round + blockID, ok := prevotes.TwoThirdsMajority() + if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { + cs.enterPrecommit(height, vote.Round) + } else if prevotes.HasTwoThirdsAny() { + cs.enterPrevoteWait(height, vote.Round) + } + case cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round: + // If the proposal is now complete, enter prevote of cs.Round. + if cs.isProposalComplete() { + cs.enterPrevote(height, cs.Round) + } + } + +} + +func defaultReceivePrecommit(cs *State, vote *types.Vote) { + height := cs.Height + precommits := cs.Votes.Precommits(vote.Round) + cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) + + blockID, ok := precommits.TwoThirdsMajority() + if ok { + // Executed as TwoThirdsMajority could be from a higher round + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + if len(blockID.Hash) != 0 { + cs.enterCommit(height, vote.Round) + if cs.config.SkipTimeoutCommit && precommits.HasAll() { + cs.enterNewRound(cs.Height, 0) + } + } else { + cs.enterPrecommitWait(height, vote.Round) + } + } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { + cs.enterNewRound(height, vote.Round) + cs.enterPrecommitWait(height, vote.Round) + } +} + +func defaultReceiveProposal(cs *State, proposal *types.Proposal) error { + // Already have one + // TODO: possibly catch double proposals + if cs.Proposal != nil { + return nil + } + + // Does not apply + if proposal.Height != cs.Height || proposal.Round != cs.Round { + return nil + } + + // Verify POLRound, which must be -1 or in range [0, proposal.Round). + if proposal.POLRound < -1 || + (proposal.POLRound >= 0 && proposal.POLRound >= proposal.Round) { + return ErrInvalidProposalPOLRound + } + + p := proposal.ToProto() + // Verify signature + if !cs.Validators.GetProposer().PubKey.VerifySignature( + types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature) { + return ErrInvalidProposalSignature + } + + proposal.Signature = p.Signature + cs.Proposal = proposal + // We don't update cs.ProposalBlockParts if it is already set. + // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. + // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! + if cs.ProposalBlockParts == nil { + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) + } + cs.Logger.Info("Received proposal", "proposal", proposal) + return nil +} diff --git a/test/maverick/consensus/msgs.go b/test/maverick/consensus/msgs.go new file mode 100644 index 0000000000..198fddaba8 --- /dev/null +++ b/test/maverick/consensus/msgs.go @@ -0,0 +1,387 @@ +package consensus + +import ( + "errors" + "fmt" + + "github.com/gogo/protobuf/proto" + + cstypes "github.com/lazyledger/lazyledger-core/consensus/types" + "github.com/lazyledger/lazyledger-core/libs/bits" + tmmath "github.com/lazyledger/lazyledger-core/libs/math" + "github.com/lazyledger/lazyledger-core/p2p" + tmcons "github.com/lazyledger/lazyledger-core/proto/tendermint/consensus" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + "github.com/lazyledger/lazyledger-core/types" +) + +// MsgToProto takes a consensus message type and returns the proto defined consensus message +func MsgToProto(msg Message) (*tmcons.Message, error) { + if msg == nil { + return nil, errors.New("consensus: message is nil") + } + var pb tmcons.Message + + switch msg := msg.(type) { + case *NewRoundStepMessage: + pb = tmcons.Message{ + Sum: &tmcons.Message_NewRoundStep{ + NewRoundStep: &tmcons.NewRoundStep{ + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, + }, + }, + } + case *NewValidBlockMessage: + pbPartSetHeader := msg.BlockPartSetHeader.ToProto() + pbBits := msg.BlockParts.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_NewValidBlock{ + NewValidBlock: &tmcons.NewValidBlock{ + Height: msg.Height, + Round: msg.Round, + BlockPartSetHeader: pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.IsCommit, + }, + }, + } + case *ProposalMessage: + pbP := msg.Proposal.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_Proposal{ + Proposal: &tmcons.Proposal{ + Proposal: *pbP, + }, + }, + } + case *ProposalPOLMessage: + pbBits := msg.ProposalPOL.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_ProposalPol{ + ProposalPol: &tmcons.ProposalPOL{ + Height: msg.Height, + ProposalPolRound: msg.ProposalPOLRound, + ProposalPol: *pbBits, + }, + }, + } + case *BlockPartMessage: + parts, err := msg.Part.ToProto() + if err != nil { + return nil, fmt.Errorf("msg to proto error: %w", err) + } + pb = tmcons.Message{ + Sum: &tmcons.Message_BlockPart{ + BlockPart: &tmcons.BlockPart{ + Height: msg.Height, + Round: msg.Round, + Part: *parts, + }, + }, + } + case *VoteMessage: + vote := msg.Vote.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_Vote{ + Vote: &tmcons.Vote{ + Vote: vote, + }, + }, + } + case *HasVoteMessage: + pb = tmcons.Message{ + Sum: &tmcons.Message_HasVote{ + HasVote: &tmcons.HasVote{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, + }, + }, + } + case *VoteSetMaj23Message: + bi := msg.BlockID.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_VoteSetMaj23{ + VoteSetMaj23: &tmcons.VoteSetMaj23{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, + }, + }, + } + case *VoteSetBitsMessage: + bi := msg.BlockID.ToProto() + bits := msg.Votes.ToProto() + + vsb := &tmcons.Message_VoteSetBits{ + VoteSetBits: &tmcons.VoteSetBits{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, + }, + } + + if bits != nil { + vsb.VoteSetBits.Votes = *bits + } + + pb = tmcons.Message{ + Sum: vsb, + } + + default: + return nil, fmt.Errorf("consensus: message not recognized: %T", msg) + } + + return &pb, nil +} + +// MsgFromProto takes a consensus proto message and returns the native go type +func MsgFromProto(msg *tmcons.Message) (Message, error) { + if msg == nil { + return nil, errors.New("consensus: nil message") + } + var pb Message + + switch msg := msg.Sum.(type) { + case *tmcons.Message_NewRoundStep: + rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step)) + // deny message based on possible overflow + if err != nil { + return nil, fmt.Errorf("denying message due to possible overflow: %w", err) + } + pb = &NewRoundStepMessage{ + Height: msg.NewRoundStep.Height, + Round: msg.NewRoundStep.Round, + Step: cstypes.RoundStepType(rs), + SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime, + LastCommitRound: msg.NewRoundStep.LastCommitRound, + } + case *tmcons.Message_NewValidBlock: + pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) + if err != nil { + return nil, fmt.Errorf("parts header to proto error: %w", err) + } + + pbBits := new(bits.BitArray) + err = pbBits.FromProto(msg.NewValidBlock.BlockParts) + if err != nil { + return nil, fmt.Errorf("parts to proto error: %w", err) + } + + pb = &NewValidBlockMessage{ + Height: msg.NewValidBlock.Height, + Round: msg.NewValidBlock.Round, + BlockPartSetHeader: *pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.NewValidBlock.IsCommit, + } + case *tmcons.Message_Proposal: + pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal) + if err != nil { + return nil, fmt.Errorf("proposal msg to proto error: %w", err) + } + + pb = &ProposalMessage{ + Proposal: pbP, + } + case *tmcons.Message_ProposalPol: + pbBits := new(bits.BitArray) + err := pbBits.FromProto(&msg.ProposalPol.ProposalPol) + if err != nil { + return nil, fmt.Errorf("proposal PoL to proto error: %w", err) + } + + pb = &ProposalPOLMessage{ + Height: msg.ProposalPol.Height, + ProposalPOLRound: msg.ProposalPol.ProposalPolRound, + ProposalPOL: pbBits, + } + case *tmcons.Message_BlockPart: + parts, err := types.PartFromProto(&msg.BlockPart.Part) + if err != nil { + return nil, fmt.Errorf("blockpart msg to proto error: %w", err) + } + pb = &BlockPartMessage{ + Height: msg.BlockPart.Height, + Round: msg.BlockPart.Round, + Part: parts, + } + case *tmcons.Message_Vote: + vote, err := types.VoteFromProto(msg.Vote.Vote) + if err != nil { + return nil, fmt.Errorf("vote msg to proto error: %w", err) + } + + pb = &VoteMessage{ + Vote: vote, + } + case *tmcons.Message_HasVote: + pb = &HasVoteMessage{ + Height: msg.HasVote.Height, + Round: msg.HasVote.Round, + Type: msg.HasVote.Type, + Index: msg.HasVote.Index, + } + case *tmcons.Message_VoteSetMaj23: + bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) + if err != nil { + return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err) + } + pb = &VoteSetMaj23Message{ + Height: msg.VoteSetMaj23.Height, + Round: msg.VoteSetMaj23.Round, + Type: msg.VoteSetMaj23.Type, + BlockID: *bi, + } + case *tmcons.Message_VoteSetBits: + bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) + if err != nil { + return nil, fmt.Errorf("block ID msg to proto error: %w", err) + } + bits := new(bits.BitArray) + err = bits.FromProto(&msg.VoteSetBits.Votes) + if err != nil { + return nil, fmt.Errorf("votes to proto error: %w", err) + } + + pb = &VoteSetBitsMessage{ + Height: msg.VoteSetBits.Height, + Round: msg.VoteSetBits.Round, + Type: msg.VoteSetBits.Type, + BlockID: *bi, + Votes: bits, + } + default: + return nil, fmt.Errorf("consensus: message not recognized: %T", msg) + } + + if err := pb.ValidateBasic(); err != nil { + return nil, err + } + + return pb, nil +} + +// MustEncode takes the reactors msg, makes it proto and marshals it +// this mimics `MustMarshalBinaryBare` in that is panics on error +func MustEncode(msg Message) []byte { + pb, err := MsgToProto(msg) + if err != nil { + panic(err) + } + enc, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + return enc +} + +// WALToProto takes a WAL message and return a proto walMessage and error +func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) { + var pb tmcons.WALMessage + + switch msg := msg.(type) { + case types.EventDataRoundState: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EventDataRoundState{ + EventDataRoundState: &tmproto.EventDataRoundState{ + Height: msg.Height, + Round: msg.Round, + Step: msg.Step, + }, + }, + } + case msgInfo: + consMsg, err := MsgToProto(msg.Msg) + if err != nil { + return nil, err + } + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_MsgInfo{ + MsgInfo: &tmcons.MsgInfo{ + Msg: *consMsg, + PeerID: string(msg.PeerID), + }, + }, + } + case timeoutInfo: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_TimeoutInfo{ + TimeoutInfo: &tmcons.TimeoutInfo{ + Duration: msg.Duration, + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + }, + }, + } + case EndHeightMessage: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EndHeight{ + EndHeight: &tmcons.EndHeight{ + Height: msg.Height, + }, + }, + } + default: + return nil, fmt.Errorf("to proto: wal message not recognized: %T", msg) + } + + return &pb, nil +} + +// WALFromProto takes a proto wal message and return a consensus walMessage and error +func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) { + if msg == nil { + return nil, errors.New("nil WAL message") + } + var pb WALMessage + + switch msg := msg.Sum.(type) { + case *tmcons.WALMessage_EventDataRoundState: + pb = types.EventDataRoundState{ + Height: msg.EventDataRoundState.Height, + Round: msg.EventDataRoundState.Round, + Step: msg.EventDataRoundState.Step, + } + case *tmcons.WALMessage_MsgInfo: + walMsg, err := MsgFromProto(&msg.MsgInfo.Msg) + if err != nil { + return nil, fmt.Errorf("msgInfo from proto error: %w", err) + } + pb = msgInfo{ + Msg: walMsg, + PeerID: p2p.ID(msg.MsgInfo.PeerID), + } + + case *tmcons.WALMessage_TimeoutInfo: + tis, err := tmmath.SafeConvertUint8(int64(msg.TimeoutInfo.Step)) + // deny message based on possible overflow + if err != nil { + return nil, fmt.Errorf("denying message due to possible overflow: %w", err) + } + pb = timeoutInfo{ + Duration: msg.TimeoutInfo.Duration, + Height: msg.TimeoutInfo.Height, + Round: msg.TimeoutInfo.Round, + Step: cstypes.RoundStepType(tis), + } + return pb, nil + case *tmcons.WALMessage_EndHeight: + pb := EndHeightMessage{ + Height: msg.EndHeight.Height, + } + return pb, nil + default: + return nil, fmt.Errorf("from proto: wal message not recognized: %T", msg) + } + return pb, nil +} diff --git a/test/maverick/consensus/reactor.go b/test/maverick/consensus/reactor.go new file mode 100644 index 0000000000..f937af63f4 --- /dev/null +++ b/test/maverick/consensus/reactor.go @@ -0,0 +1,1720 @@ +package consensus + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + + cstypes "github.com/lazyledger/lazyledger-core/consensus/types" + "github.com/lazyledger/lazyledger-core/libs/bits" + tmevents "github.com/lazyledger/lazyledger-core/libs/events" + tmjson "github.com/lazyledger/lazyledger-core/libs/json" + "github.com/lazyledger/lazyledger-core/libs/log" + tmsync "github.com/lazyledger/lazyledger-core/libs/sync" + "github.com/lazyledger/lazyledger-core/p2p" + tmcons "github.com/lazyledger/lazyledger-core/proto/tendermint/consensus" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + sm "github.com/lazyledger/lazyledger-core/state" + "github.com/lazyledger/lazyledger-core/types" + tmtime "github.com/lazyledger/lazyledger-core/types/time" +) + +const ( + StateChannel = byte(0x20) + DataChannel = byte(0x21) + VoteChannel = byte(0x22) + VoteSetBitsChannel = byte(0x23) + + maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. + + blocksToContributeToBecomeGoodPeer = 10000 + votesToContributeToBecomeGoodPeer = 10000 +) + +//----------------------------------------------------------------------------- + +// Reactor defines a reactor for the consensus service. +type Reactor struct { + p2p.BaseReactor // BaseService + p2p.Switch + + conS *State + + mtx tmsync.RWMutex + waitSync bool + eventBus *types.EventBus + + Metrics *Metrics +} + +type ReactorOption func(*Reactor) + +// NewReactor returns a new Reactor with the given +// consensusState. +func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { + conR := &Reactor{ + conS: consensusState, + waitSync: waitSync, + Metrics: NopMetrics(), + } + conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) + + for _, option := range options { + option(conR) + } + + return conR +} + +// OnStart implements BaseService by subscribing to events, which later will be +// broadcasted to other peers and starting state if we're not in fast sync. +func (conR *Reactor) OnStart() error { + conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync()) + + // start routine that computes peer statistics for evaluating peer quality + go conR.peerStatsRoutine() + + conR.subscribeToBroadcastEvents() + + if !conR.WaitSync() { + conR.conS.SetSwitch(conR.Switch) + err := conR.conS.Start() + if err != nil { + return err + } + } + + return nil +} + +// OnStop implements BaseService by unsubscribing from events and stopping +// state. +func (conR *Reactor) OnStop() { + conR.unsubscribeFromBroadcastEvents() + if err := conR.conS.Stop(); err != nil { + conR.Logger.Error("Error stopping consensus state", "err", err) + } + if !conR.WaitSync() { + conR.conS.Wait() + } +} + +// SwitchToConsensus switches from fast_sync mode to consensus mode. +// It resets the state, turns off fast_sync, and starts the consensus state-machine +func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { + conR.Logger.Info("SwitchToConsensus") + + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + conR.conS.reconstructLastCommit(state) + } + + // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a + // NewRoundStepMessage. + conR.conS.updateToState(state) + + conR.mtx.Lock() + conR.waitSync = false + conR.mtx.Unlock() + conR.Metrics.FastSyncing.Set(0) + conR.Metrics.StateSyncing.Set(0) + + if skipWAL { + conR.conS.doWALCatchup = false + } + conR.conS.SetSwitch(conR.Switch) + err := conR.conS.Start() + if err != nil { + panic(fmt.Sprintf(`Failed to start consensus state: %v + +conS: +%+v + +conR: +%+v`, err, conR.conS, conR)) + } +} + +// GetChannels implements Reactor +func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { + // TODO optimize + return []*p2p.ChannelDescriptor{ + { + ID: StateChannel, + Priority: 5, + SendQueueCapacity: 100, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: DataChannel, // maybe split between gossiping current block and catchup stuff + // once we gossip the whole block there's nothing left to send until next height or round + Priority: 10, + SendQueueCapacity: 100, + RecvBufferCapacity: 50 * 4096, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: VoteChannel, + Priority: 5, + SendQueueCapacity: 100, + RecvBufferCapacity: 100 * 100, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: VoteSetBitsChannel, + Priority: 1, + SendQueueCapacity: 2, + RecvBufferCapacity: 1024, + RecvMessageCapacity: maxMsgSize, + }, + } +} + +// InitPeer implements Reactor by creating a state for the peer. +func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer { + peerState := NewPeerState(peer).SetLogger(conR.Logger) + peer.Set(types.PeerStateKey, peerState) + return peer +} + +// AddPeer implements Reactor by spawning multiple gossiping goroutines for the +// peer. +func (conR *Reactor) AddPeer(peer p2p.Peer) { + if !conR.IsRunning() { + return + } + + peerState, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("peer %v has no state", peer)) + } + // Begin routines for this peer. + go conR.gossipDataRoutine(peer, peerState) + go conR.gossipVotesRoutine(peer, peerState) + go conR.queryMaj23Routine(peer, peerState) + + // Send our state to peer. + // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). + if !conR.WaitSync() { + conR.sendNewRoundStepMessage(peer) + } +} + +// RemovePeer is a noop. +func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { + if !conR.IsRunning() { + return + } + // TODO + // ps, ok := peer.Get(PeerStateKey).(*PeerState) + // if !ok { + // panic(fmt.Sprintf("Peer %v has no state", peer)) + // } + // ps.Disconnect() +} + +// Receive implements Reactor +// NOTE: We process these messages even when we're fast_syncing. +// Messages affect either a peer state or the consensus state. +// Peer state updates can happen in parallel, but processing of +// proposals, block parts, and votes are ordered by the receiveRoutine +// NOTE: blocks on consensus state for proposals, block parts, and votes +func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + if !conR.IsRunning() { + conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) + return + } + + msg, err := decodeMsg(msgBytes) + if err != nil { + conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + + if err = msg.ValidateBasic(); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + + conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + + // Get peer states + ps, ok := src.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", src)) + } + + switch chID { + case StateChannel: + switch msg := msg.(type) { + case *NewRoundStepMessage: + conR.conS.mtx.Lock() + initialHeight := conR.conS.state.InitialHeight + conR.conS.mtx.Unlock() + if err = msg.ValidateHeight(initialHeight); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + ps.ApplyNewRoundStepMessage(msg) + case *NewValidBlockMessage: + ps.ApplyNewValidBlockMessage(msg) + case *HasVoteMessage: + ps.ApplyHasVoteMessage(msg) + case *VoteSetMaj23Message: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() + if height != msg.Height { + return + } + // Peer claims to have a maj23 for some BlockID at H,R,S, + err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) + if err != nil { + conR.Switch.StopPeerForError(src, err) + return + } + // Respond with a VoteSetBitsMessage showing which votes we have. + // (and consequently shows which we don't have) + var ourVotes *bits.BitArray + switch msg.Type { + case tmproto.PrevoteType: + ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) + case tmproto.PrecommitType: + ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) + default: + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") + } + src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: msg.BlockID, + Votes: ourVotes, + })) + default: + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + case DataChannel: + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) + return + } + switch msg := msg.(type) { + case *ProposalMessage: + ps.SetHasProposal(msg.Proposal) + conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + case *ProposalPOLMessage: + ps.ApplyProposalPOLMessage(msg) + case *BlockPartMessage: + ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) + conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) + conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + default: + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + case VoteChannel: + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) + return + } + switch msg := msg.(type) { + case *VoteMessage: + cs := conR.conS + cs.mtx.RLock() + height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() + cs.mtx.RUnlock() + ps.EnsureVoteBitArrays(height, valSize) + ps.EnsureVoteBitArrays(height-1, lastCommitSize) + ps.SetHasVote(msg.Vote) + + cs.peerMsgQueue <- msgInfo{msg, src.ID()} + + default: + // don't punish (leave room for soft upgrades) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + case VoteSetBitsChannel: + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) + return + } + switch msg := msg.(type) { + case *VoteSetBitsMessage: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() + + if height == msg.Height { + var ourVotes *bits.BitArray + switch msg.Type { + case tmproto.PrevoteType: + ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) + case tmproto.PrecommitType: + ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) + default: + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") + } + ps.ApplyVoteSetBitsMessage(msg, ourVotes) + } else { + ps.ApplyVoteSetBitsMessage(msg, nil) + } + default: + // don't punish (leave room for soft upgrades) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + default: + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) + } +} + +// SetEventBus sets event bus. +func (conR *Reactor) SetEventBus(b *types.EventBus) { + conR.eventBus = b + conR.conS.SetEventBus(b) +} + +// WaitSync returns whether the consensus reactor is waiting for state/fast sync. +func (conR *Reactor) WaitSync() bool { + conR.mtx.RLock() + defer conR.mtx.RUnlock() + return conR.waitSync +} + +//-------------------------------------- + +// subscribeToBroadcastEvents subscribes for new round steps and votes +// using internal pubsub defined on state to broadcast +// them to peers upon receiving. +func (conR *Reactor) subscribeToBroadcastEvents() { + const subscriber = "consensus-reactor" + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, + func(data tmevents.EventData) { + conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } + + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, + func(data tmevents.EventData) { + conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } + + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, + func(data tmevents.EventData) { + conR.broadcastHasVoteMessage(data.(*types.Vote)) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } + +} + +func (conR *Reactor) unsubscribeFromBroadcastEvents() { + const subscriber = "consensus-reactor" + conR.conS.evsw.RemoveListener(subscriber) +} + +func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { + nrsMsg := makeRoundStepMessage(rs) + conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg)) +} + +func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { + csMsg := &NewValidBlockMessage{ + Height: rs.Height, + Round: rs.Round, + BlockPartSetHeader: rs.ProposalBlockParts.Header(), + BlockParts: rs.ProposalBlockParts.BitArray(), + IsCommit: rs.Step == cstypes.RoundStepCommit, + } + conR.Switch.Broadcast(StateChannel, MustEncode(csMsg)) +} + +// Broadcasts HasVoteMessage to peers that care. +func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { + msg := &HasVoteMessage{ + Height: vote.Height, + Round: vote.Round, + Type: vote.Type, + Index: vote.ValidatorIndex, + } + conR.Switch.Broadcast(StateChannel, MustEncode(msg)) + /* + // TODO: Make this broadcast more selective. + for _, peer := range conR.Switch.Peers().List() { + ps, ok := peer.Get(PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", peer)) + } + prs := ps.GetRoundState() + if prs.Height == vote.Height { + // TODO: Also filter on round? + peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) + } else { + // Height doesn't match + // TODO: check a field, maybe CatchupCommitRound? + // TODO: But that requires changing the struct field comment. + } + } + */ +} + +func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { + nrsMsg = &NewRoundStepMessage{ + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, + SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), + LastCommitRound: rs.LastCommit.GetRound(), + } + return +} + +func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { + rs := conR.conS.GetRoundState() + nrsMsg := makeRoundStepMessage(rs) + peer.Send(StateChannel, MustEncode(nrsMsg)) +} + +func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping gossipDataRoutine for peer") + return + } + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + + // Send proposal Block parts? + if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { + if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { + part := rs.ProposalBlockParts.GetPart(index) + msg := &BlockPartMessage{ + Height: rs.Height, // This tells peer that this part applies to us. + Round: rs.Round, // This tells peer that this part applies to us. + Part: part, + } + logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) + if peer.Send(DataChannel, MustEncode(msg)) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } + continue OUTER_LOOP + } + } + + // If the peer is on a previous height that we have, help catch up. + if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) { + heightLogger := logger.With("height", prs.Height) + + // if we never received the commit message from the peer, the block parts wont be initialized + if prs.ProposalBlockParts == nil { + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + heightLogger.Error("Failed to load block meta", + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + } else { + ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) + } + // continue the loop since prs is a copy and not effected by this initialization + continue OUTER_LOOP + } + conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) + continue OUTER_LOOP + } + + // If height and round don't match, sleep. + if (rs.Height != prs.Height) || (rs.Round != prs.Round) { + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + continue OUTER_LOOP + } + + // By here, height and round match. + // Proposal block parts were already matched and sent if any were wanted. + // (These can match on hash so the round doesn't matter) + // Now consider sending other things, like the Proposal itself. + + // Send Proposal && ProposalPOL BitArray? + if rs.Proposal != nil && !prs.Proposal { + // Proposal: share the proposal metadata with peer. + { + msg := &ProposalMessage{Proposal: rs.Proposal} + logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) + if peer.Send(DataChannel, MustEncode(msg)) { + // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! + ps.SetHasProposal(rs.Proposal) + } + } + // ProposalPOL: lets peer know which POL votes we have so far. + // Peer must receive ProposalMessage first. + // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, + // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). + if 0 <= rs.Proposal.POLRound { + msg := &ProposalPOLMessage{ + Height: rs.Height, + ProposalPOLRound: rs.Proposal.POLRound, + ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), + } + logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) + peer.Send(DataChannel, MustEncode(msg)) + } + continue OUTER_LOOP + } + + // Nothing to do. Sleep. + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + continue OUTER_LOOP + } +} + +func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { + + if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { + // Ensure that the peer's PartSetHeader is correct + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + logger.Error("Failed to load block meta", "ourHeight", rs.Height, + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + return + } else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) { + logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping", + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + return + } + // Load the part + part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) + if part == nil { + logger.Error("Could not load part", "index", index, + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + return + } + // Send the part + msg := &BlockPartMessage{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: part, + } + logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) + if peer.Send(DataChannel, MustEncode(msg)) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } else { + logger.Debug("Sending block part for catchup failed") + } + return + } + time.Sleep(conR.conS.config.PeerGossipSleepDuration) +} + +func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + + // Simple hack to throttle logs upon sleep. + var sleeping = 0 + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping gossipVotesRoutine for peer") + return + } + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + + switch sleeping { + case 1: // First sleep + sleeping = 2 + case 2: // No more sleep + sleeping = 0 + } + + // If height matches, then send LastCommit, Prevotes, Precommits. + if rs.Height == prs.Height { + heightLogger := logger.With("height", prs.Height) + if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) { + continue OUTER_LOOP + } + } + + // Special catchup logic. + // If peer is lagging by height 1, send LastCommit. + if prs.Height != 0 && rs.Height == prs.Height+1 { + if ps.PickSendVote(rs.LastCommit) { + logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) + continue OUTER_LOOP + } + } + + // Catchup logic + // If peer is lagging by more than 1, send Commit. + if prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= conR.conS.blockStore.Base() { + // Load the block commit for prs.Height, + // which contains precommit signatures for prs.Height. + if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil { + if ps.PickSendVote(commit) { + logger.Debug("Picked Catchup commit to send", "height", prs.Height) + continue OUTER_LOOP + } + } + } + + if sleeping == 0 { + // We sent nothing. Sleep... + sleeping = 1 + logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height, + "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes, + "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits) + } else if sleeping == 2 { + // Continued sleep... + sleeping = 1 + } + + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + continue OUTER_LOOP + } +} + +func (conR *Reactor) gossipVotesForHeight( + logger log.Logger, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, + ps *PeerState, +) bool { + + // If there are lastCommits to send... + if prs.Step == cstypes.RoundStepNewHeight { + if ps.PickSendVote(rs.LastCommit) { + logger.Debug("Picked rs.LastCommit to send") + return true + } + } + // If there are POL prevotes to send... + if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if ps.PickSendVote(polPrevotes) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true + } + } + } + // If there are prevotes to send... + if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are precommits to send... + if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { + logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are prevotes to send...Needed because of validBlock mechanism + if prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are POLPrevotes to send... + if prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if ps.PickSendVote(polPrevotes) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true + } + } + } + + return false +} + +// NOTE: `queryMaj23Routine` has a simple crude design since it only comes +// into play for liveness when there's a signature DDoS attack happening. +func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping queryMaj23Routine for peer") + return + } + + // Maybe send Height/Round/Prevotes + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrevoteType, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + // Maybe send Height/Round/Precommits + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrecommitType, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + // Maybe send Height/Round/ProposalPOL + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { + if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.ProposalPOLRound, + Type: tmproto.PrevoteType, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + // Little point sending LastCommitRound/LastCommit, + // These are fleeting and non-blocking. + + // Maybe send Height/CatchupCommitRound/CatchupCommit. + { + prs := ps.GetRoundState() + if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && + prs.Height >= conR.conS.blockStore.Base() { + if commit := conR.conS.LoadCommit(prs.Height); commit != nil { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: commit.Round, + Type: tmproto.PrecommitType, + BlockID: commit.BlockID, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + + continue OUTER_LOOP + } +} + +func (conR *Reactor) peerStatsRoutine() { + for { + if !conR.IsRunning() { + conR.Logger.Info("Stopping peerStatsRoutine") + return + } + + select { + case msg := <-conR.conS.statsMsgQueue: + // Get peer + peer := conR.Switch.Peers().Get(msg.PeerID) + if peer == nil { + conR.Logger.Debug("Attempt to update stats for non-existent peer", + "peer", msg.PeerID) + continue + } + // Get peer state + ps, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", peer)) + } + switch msg.Msg.(type) { + case *VoteMessage: + if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(peer) + } + case *BlockPartMessage: + if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(peer) + } + } + case <-conR.conS.Quit(): + return + + case <-conR.Quit(): + return + } + } +} + +// String returns a string representation of the Reactor. +// NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. +// TODO: improve! +func (conR *Reactor) String() string { + // better not to access shared variables + return "ConsensusReactor" // conR.StringIndented("") +} + +// StringIndented returns an indented string representation of the Reactor +func (conR *Reactor) StringIndented(indent string) string { + s := "ConsensusReactor{\n" + s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n" + for _, peer := range conR.Switch.Peers().List() { + ps, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", peer)) + } + s += indent + " " + ps.StringIndented(indent+" ") + "\n" + } + s += indent + "}" + return s +} + +// ReactorMetrics sets the metrics +func ReactorMetrics(metrics *Metrics) ReactorOption { + return func(conR *Reactor) { conR.Metrics = metrics } +} + +//----------------------------------------------------------------------------- + +var ( + ErrPeerStateHeightRegression = errors.New("error peer state height regression") + ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime") +) + +// PeerState contains the known state of a peer, including its connection and +// threadsafe access to its PeerRoundState. +// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go. +// Be mindful of what you Expose. +type PeerState struct { + peer p2p.Peer + logger log.Logger + + mtx sync.Mutex // NOTE: Modify below using setters, never directly. + PRS cstypes.PeerRoundState `json:"round_state"` // Exposed. + Stats *peerStateStats `json:"stats"` // Exposed. +} + +// peerStateStats holds internal statistics for a peer. +type peerStateStats struct { + Votes int `json:"votes"` + BlockParts int `json:"block_parts"` +} + +func (pss peerStateStats) String() string { + return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", + pss.Votes, pss.BlockParts) +} + +// NewPeerState returns a new PeerState for the given Peer +func NewPeerState(peer p2p.Peer) *PeerState { + return &PeerState{ + peer: peer, + logger: log.NewNopLogger(), + PRS: cstypes.PeerRoundState{ + Round: -1, + ProposalPOLRound: -1, + LastCommitRound: -1, + CatchupCommitRound: -1, + }, + Stats: &peerStateStats{}, + } +} + +// SetLogger allows to set a logger on the peer state. Returns the peer state +// itself. +func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { + ps.logger = logger + return ps +} + +// GetRoundState returns an shallow copy of the PeerRoundState. +// There's no point in mutating it since it won't change PeerState. +func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + prs := ps.PRS // copy + return &prs +} + +// ToJSON returns a json of PeerState. +func (ps *PeerState) ToJSON() ([]byte, error) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return tmjson.Marshal(ps) +} + +// GetHeight returns an atomic snapshot of the PeerRoundState's height +// used by the mempool to ensure peers are caught up before broadcasting new txs +func (ps *PeerState) GetHeight() int64 { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.PRS.Height +} + +// SetHasProposal sets the given proposal as known for the peer. +func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { + return + } + + if ps.PRS.Proposal { + return + } + + ps.PRS.Proposal = true + + // ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage + if ps.PRS.ProposalBlockParts != nil { + return + } + + ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader + ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total)) + ps.PRS.ProposalPOLRound = proposal.POLRound + ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. +} + +// InitProposalBlockParts initializes the peer's proposal block parts header and bit array. +func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.ProposalBlockParts != nil { + return + } + + ps.PRS.ProposalBlockPartSetHeader = partSetHeader + ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total)) +} + +// SetHasProposalBlockPart sets the given block part index as known for the peer. +func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != height || ps.PRS.Round != round { + return + } + + ps.PRS.ProposalBlockParts.SetIndex(index, true) +} + +// PickSendVote picks a vote and sends it to the peer. +// Returns true if vote was sent. +func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { + if vote, ok := ps.PickVoteToSend(votes); ok { + msg := &VoteMessage{vote} + ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) + if ps.peer.Send(VoteChannel, MustEncode(msg)) { + ps.SetHasVote(vote) + return true + } + return false + } + return false +} + +// PickVoteToSend picks a vote to send to the peer. +// Returns true if a vote was picked. +// NOTE: `votes` must be the correct Size() for the Height(). +func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if votes.Size() == 0 { + return nil, false + } + + height, round, votesType, size := + votes.GetHeight(), votes.GetRound(), tmproto.SignedMsgType(votes.Type()), votes.Size() + + // Lazily set data using 'votes'. + if votes.IsCommit() { + ps.ensureCatchupCommitRound(height, round, size) + } + ps.ensureVoteBitArrays(height, size) + + psVotes := ps.getVoteBitArray(height, round, votesType) + if psVotes == nil { + return nil, false // Not something worth sending + } + if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { + return votes.GetByIndex(int32(index)), true + } + return nil, false +} + +func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray { + if !types.IsVoteTypeValid(votesType) { + return nil + } + + if ps.PRS.Height == height { + if ps.PRS.Round == round { + switch votesType { + case tmproto.PrevoteType: + return ps.PRS.Prevotes + case tmproto.PrecommitType: + return ps.PRS.Precommits + } + } + if ps.PRS.CatchupCommitRound == round { + switch votesType { + case tmproto.PrevoteType: + return nil + case tmproto.PrecommitType: + return ps.PRS.CatchupCommit + } + } + if ps.PRS.ProposalPOLRound == round { + switch votesType { + case tmproto.PrevoteType: + return ps.PRS.ProposalPOL + case tmproto.PrecommitType: + return nil + } + } + return nil + } + if ps.PRS.Height == height+1 { + if ps.PRS.LastCommitRound == round { + switch votesType { + case tmproto.PrevoteType: + return nil + case tmproto.PrecommitType: + return ps.PRS.LastCommit + } + } + return nil + } + return nil +} + +// 'round': A round for which we have a +2/3 commit. +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) { + if ps.PRS.Height != height { + return + } + /* + NOTE: This is wrong, 'round' could change. + e.g. if orig round is not the same as block LastCommit round. + if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { + panic(fmt.Sprintf( + "Conflicting CatchupCommitRound. Height: %v, + Orig: %v, + New: %v", + height, + ps.CatchupCommitRound, + round)) + } + */ + if ps.PRS.CatchupCommitRound == round { + return // Nothing to do! + } + ps.PRS.CatchupCommitRound = round + if round == ps.PRS.Round { + ps.PRS.CatchupCommit = ps.PRS.Precommits + } else { + ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + } +} + +// EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking +// what votes this peer has received. +// NOTE: It's important to make sure that numValidators actually matches +// what the node sees as the number of validators for height. +func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + ps.ensureVoteBitArrays(height, numValidators) +} + +func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { + if ps.PRS.Height == height { + if ps.PRS.Prevotes == nil { + ps.PRS.Prevotes = bits.NewBitArray(numValidators) + } + if ps.PRS.Precommits == nil { + ps.PRS.Precommits = bits.NewBitArray(numValidators) + } + if ps.PRS.CatchupCommit == nil { + ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + } + if ps.PRS.ProposalPOL == nil { + ps.PRS.ProposalPOL = bits.NewBitArray(numValidators) + } + } else if ps.PRS.Height == height+1 { + if ps.PRS.LastCommit == nil { + ps.PRS.LastCommit = bits.NewBitArray(numValidators) + } + } +} + +// RecordVote increments internal votes related statistics for this peer. +// It returns the total number of added votes. +func (ps *PeerState) RecordVote() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.Stats.Votes++ + + return ps.Stats.Votes +} + +// VotesSent returns the number of blocks for which peer has been sending us +// votes. +func (ps *PeerState) VotesSent() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.Stats.Votes +} + +// RecordBlockPart increments internal block part related statistics for this peer. +// It returns the total number of added block parts. +func (ps *PeerState) RecordBlockPart() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.Stats.BlockParts++ + return ps.Stats.BlockParts +} + +// BlockPartsSent returns the number of useful block parts the peer has sent us. +func (ps *PeerState) BlockPartsSent() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.Stats.BlockParts +} + +// SetHasVote sets the given vote as known by the peer +func (ps *PeerState) SetHasVote(vote *types.Vote) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) +} + +func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) { + logger := ps.logger.With( + "peerH/R", + fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), + "H/R", + fmt.Sprintf("%d/%d", height, round)) + logger.Debug("setHasVote", "type", voteType, "index", index) + + // NOTE: some may be nil BitArrays -> no side effects. + psVotes := ps.getVoteBitArray(height, round, voteType) + if psVotes != nil { + psVotes.SetIndex(int(index), true) + } +} + +// ApplyNewRoundStepMessage updates the peer state for the new round. +func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + // Ignore duplicates or decreases + if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 { + return + } + + // Just remember these values. + psHeight := ps.PRS.Height + psRound := ps.PRS.Round + psCatchupCommitRound := ps.PRS.CatchupCommitRound + psCatchupCommit := ps.PRS.CatchupCommit + + startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) + ps.PRS.Height = msg.Height + ps.PRS.Round = msg.Round + ps.PRS.Step = msg.Step + ps.PRS.StartTime = startTime + if psHeight != msg.Height || psRound != msg.Round { + ps.PRS.Proposal = false + ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{} + ps.PRS.ProposalBlockParts = nil + ps.PRS.ProposalPOLRound = -1 + ps.PRS.ProposalPOL = nil + // We'll update the BitArray capacity later. + ps.PRS.Prevotes = nil + ps.PRS.Precommits = nil + } + if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound { + // Peer caught up to CatchupCommitRound. + // Preserve psCatchupCommit! + // NOTE: We prefer to use prs.Precommits if + // pr.Round matches pr.CatchupCommitRound. + ps.PRS.Precommits = psCatchupCommit + } + if psHeight != msg.Height { + // Shift Precommits to LastCommit. + if psHeight+1 == msg.Height && psRound == msg.LastCommitRound { + ps.PRS.LastCommitRound = msg.LastCommitRound + ps.PRS.LastCommit = ps.PRS.Precommits + } else { + ps.PRS.LastCommitRound = msg.LastCommitRound + ps.PRS.LastCommit = nil + } + // We'll update the BitArray capacity later. + ps.PRS.CatchupCommitRound = -1 + ps.PRS.CatchupCommit = nil + } +} + +// ApplyNewValidBlockMessage updates the peer state for the new valid block. +func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + + if ps.PRS.Round != msg.Round && !msg.IsCommit { + return + } + + ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader + ps.PRS.ProposalBlockParts = msg.BlockParts +} + +// ApplyProposalPOLMessage updates the peer state for the new proposal POL. +func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound { + return + } + + // TODO: Merge onto existing ps.PRS.ProposalPOL? + // We might have sent some prevotes in the meantime. + ps.PRS.ProposalPOL = msg.ProposalPOL +} + +// ApplyHasVoteMessage updates the peer state for the new vote. +func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + + ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +} + +// ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes +// it claims to have for the corresponding BlockID. +// `ourVotes` is a BitArray of votes we have for msg.BlockID +// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height), +// we conservatively overwrite ps's votes w/ msg.Votes. +func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *bits.BitArray) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type) + if votes != nil { + if ourVotes == nil { + votes.Update(msg.Votes) + } else { + otherVotes := votes.Sub(ourVotes) + hasVotes := otherVotes.Or(msg.Votes) + votes.Update(hasVotes) + } + } +} + +// String returns a string representation of the PeerState +func (ps *PeerState) String() string { + return ps.StringIndented("") +} + +// StringIndented returns a string representation of the PeerState +func (ps *PeerState) StringIndented(indent string) string { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return fmt.Sprintf(`PeerState{ +%s Key %v +%s RoundState %v +%s Stats %v +%s}`, + indent, ps.peer.ID(), + indent, ps.PRS.StringIndented(indent+" "), + indent, ps.Stats, + indent) +} + +//----------------------------------------------------------------------------- +// Messages + +// Message is a message that can be sent and received on the Reactor +type Message interface { + ValidateBasic() error +} + +// func init() { +// tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage") +// tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage") +// tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal") +// tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL") +// tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart") +// tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote") +// tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote") +// tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23") +// tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") +// } + +func decodeMsg(bz []byte) (msg Message, err error) { + pb := &tmcons.Message{} + if err = proto.Unmarshal(bz, pb); err != nil { + return msg, err + } + + return MsgFromProto(pb) +} + +//------------------------------------- + +// NewRoundStepMessage is sent for every step taken in the ConsensusState. +// For every height/round/step transition +type NewRoundStepMessage struct { + Height int64 + Round int32 + Step cstypes.RoundStepType + SecondsSinceStartTime int64 + LastCommitRound int32 +} + +// ValidateBasic performs basic validation. +func (m *NewRoundStepMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if !m.Step.IsValid() { + return errors.New("invalid Step") + } + + // NOTE: SecondsSinceStartTime may be negative + + // LastCommitRound will be -1 for the initial height, but we don't know what height this is + // since it can be specified in genesis. The reactor will have to validate this via + // ValidateHeight(). + if m.LastCommitRound < -1 { + return errors.New("invalid LastCommitRound (cannot be < -1)") + } + + return nil +} + +// ValidateHeight validates the height given the chain's initial height. +func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error { + if m.Height < initialHeight { + return fmt.Errorf("invalid Height %v (lower than initial height %v)", + m.Height, initialHeight) + } + if m.Height == initialHeight && m.LastCommitRound != -1 { + return fmt.Errorf("invalid LastCommitRound %v (must be -1 for initial height %v)", + m.LastCommitRound, initialHeight) + } + if m.Height > initialHeight && m.LastCommitRound < 0 { + return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint + initialHeight) + } + return nil +} + +// String returns a string representation. +func (m *NewRoundStepMessage) String() string { + return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", + m.Height, m.Round, m.Step, m.LastCommitRound) +} + +//------------------------------------- + +// NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +type NewValidBlockMessage struct { + Height int64 + Round int32 + BlockPartSetHeader types.PartSetHeader + BlockParts *bits.BitArray + IsCommit bool +} + +// ValidateBasic performs basic validation. +func (m *NewValidBlockMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if err := m.BlockPartSetHeader.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockPartSetHeader: %v", err) + } + if m.BlockParts.Size() == 0 { + return errors.New("empty blockParts") + } + if m.BlockParts.Size() != int(m.BlockPartSetHeader.Total) { + return fmt.Errorf("blockParts bit array size %d not equal to BlockPartSetHeader.Total %d", + m.BlockParts.Size(), + m.BlockPartSetHeader.Total) + } + if m.BlockParts.Size() > int(types.MaxBlockPartsCount) { + return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) + } + return nil +} + +// String returns a string representation. +func (m *NewValidBlockMessage) String() string { + return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", + m.Height, m.Round, m.BlockPartSetHeader, m.BlockParts, m.IsCommit) +} + +//------------------------------------- + +// ProposalMessage is sent when a new block is proposed. +type ProposalMessage struct { + Proposal *types.Proposal +} + +// ValidateBasic performs basic validation. +func (m *ProposalMessage) ValidateBasic() error { + return m.Proposal.ValidateBasic() +} + +// String returns a string representation. +func (m *ProposalMessage) String() string { + return fmt.Sprintf("[Proposal %v]", m.Proposal) +} + +//------------------------------------- + +// ProposalPOLMessage is sent when a previous proposal is re-proposed. +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int32 + ProposalPOL *bits.BitArray +} + +// ValidateBasic performs basic validation. +func (m *ProposalPOLMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.ProposalPOLRound < 0 { + return errors.New("negative ProposalPOLRound") + } + if m.ProposalPOL.Size() == 0 { + return errors.New("empty ProposalPOL bit array") + } + if m.ProposalPOL.Size() > types.MaxVotesCount { + return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) + } + return nil +} + +// String returns a string representation. +func (m *ProposalPOLMessage) String() string { + return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) +} + +//------------------------------------- + +// BlockPartMessage is sent when gossipping a piece of the proposed block. +type BlockPartMessage struct { + Height int64 + Round int32 + Part *types.Part +} + +// ValidateBasic performs basic validation. +func (m *BlockPartMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if err := m.Part.ValidateBasic(); err != nil { + return fmt.Errorf("wrong Part: %v", err) + } + return nil +} + +// String returns a string representation. +func (m *BlockPartMessage) String() string { + return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) +} + +//------------------------------------- + +// VoteMessage is sent when voting for a proposal (or lack thereof). +type VoteMessage struct { + Vote *types.Vote +} + +// ValidateBasic performs basic validation. +func (m *VoteMessage) ValidateBasic() error { + return m.Vote.ValidateBasic() +} + +// String returns a string representation. +func (m *VoteMessage) String() string { + return fmt.Sprintf("[Vote %v]", m.Vote) +} + +//------------------------------------- + +// HasVoteMessage is sent to indicate that a particular vote has been received. +type HasVoteMessage struct { + Height int64 + Round int32 + Type tmproto.SignedMsgType + Index int32 +} + +// ValidateBasic performs basic validation. +func (m *HasVoteMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("invalid Type") + } + if m.Index < 0 { + return errors.New("negative Index") + } + return nil +} + +// String returns a string representation. +func (m *HasVoteMessage) String() string { + return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) +} + +//------------------------------------- + +// VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. +type VoteSetMaj23Message struct { + Height int64 + Round int32 + Type tmproto.SignedMsgType + BlockID types.BlockID +} + +// ValidateBasic performs basic validation. +func (m *VoteSetMaj23Message) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockID: %v", err) + } + return nil +} + +// String returns a string representation. +func (m *VoteSetMaj23Message) String() string { + return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) +} + +//------------------------------------- + +// VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. +type VoteSetBitsMessage struct { + Height int64 + Round int32 + Type tmproto.SignedMsgType + BlockID types.BlockID + Votes *bits.BitArray +} + +// ValidateBasic performs basic validation. +func (m *VoteSetBitsMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockID: %v", err) + } + // NOTE: Votes.Size() can be zero if the node does not have any + if m.Votes.Size() > types.MaxVotesCount { + return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount) + } + return nil +} + +// String returns a string representation. +func (m *VoteSetBitsMessage) String() string { + return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) +} + +//------------------------------------- diff --git a/test/maverick/consensus/replay.go b/test/maverick/consensus/replay.go new file mode 100644 index 0000000000..beb0d70039 --- /dev/null +++ b/test/maverick/consensus/replay.go @@ -0,0 +1,534 @@ +package consensus + +import ( + "bytes" + "context" + "fmt" + "hash/crc32" + "io" + "reflect" + "time" + + abci "github.com/lazyledger/lazyledger-core/abci/types" + "github.com/lazyledger/lazyledger-core/crypto/merkle" + "github.com/lazyledger/lazyledger-core/libs/log" + "github.com/lazyledger/lazyledger-core/proxy" + sm "github.com/lazyledger/lazyledger-core/state" + "github.com/lazyledger/lazyledger-core/types" +) + +var crc32c = crc32.MakeTable(crc32.Castagnoli) + +// Functionality to replay blocks and messages on recovery from a crash. +// There are two general failure scenarios: +// +// 1. failure during consensus +// 2. failure while applying the block +// +// The former is handled by the WAL, the latter by the proxyApp Handshake on +// restart, which ultimately hands off the work to the WAL. + +//----------------------------------------- +// 1. Recover from failure during consensus +// (by replaying messages from the WAL) +//----------------------------------------- + +// Unmarshal and apply a single message to the consensus state as if it were +// received in receiveRoutine. Lines that start with "#" are ignored. +// NOTE: receiveRoutine should not be running. +func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { + // Skip meta messages which exist for demarcating boundaries. + if _, ok := msg.Msg.(EndHeightMessage); ok { + return nil + } + + // for logging + switch m := msg.Msg.(type) { + case types.EventDataRoundState: + cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) + // these are playback checks + ticker := time.After(time.Second * 2) + if newStepSub != nil { + select { + case stepMsg := <-newStepSub.Out(): + m2 := stepMsg.Data().(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) + } + case <-newStepSub.Cancelled(): + return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was cancelled") + case <-ticker: + return fmt.Errorf("failed to read off newStepSub.Out()") + } + } + case msgInfo: + peerID := m.PeerID + if peerID == "" { + peerID = "local" + } + switch msg := m.Msg.(type) { + case *ProposalMessage: + p := msg.Proposal + cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", + p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID) + case *BlockPartMessage: + cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) + case *VoteMessage: + v := msg.Vote + cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, + "blockID", v.BlockID, "peer", peerID) + } + + cs.handleMsg(m) + case timeoutInfo: + cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) + cs.handleTimeout(m, cs.RoundState) + default: + return fmt.Errorf("replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg)) + } + return nil +} + +// Replay only those messages since the last block. `timeoutRoutine` should +// run concurrently to read off tickChan. +func (cs *State) catchupReplay(csHeight int64) error { + + // Set replayMode to true so we don't log signing errors. + cs.replayMode = true + defer func() { cs.replayMode = false }() + + // Ensure that #ENDHEIGHT for this height doesn't exist. + // NOTE: This is just a sanity check. As far as we know things work fine + // without it, and Handshake could reuse State if it weren't for + // this check (since we can crash after writing #ENDHEIGHT). + // + // Ignore data corruption errors since this is a sanity check. + gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if err != nil { + return err + } + if gr != nil { + if err := gr.Close(); err != nil { + return err + } + } + if found { + return fmt.Errorf("wal should not contain #ENDHEIGHT %d", csHeight) + } + + // Search for last height marker. + // + // Ignore data corruption errors in previous heights because we only care about last height + if csHeight < cs.state.InitialHeight { + return fmt.Errorf("cannot replay height %v, below initial height %v", csHeight, cs.state.InitialHeight) + } + endHeight := csHeight - 1 + if csHeight == cs.state.InitialHeight { + endHeight = 0 + } + gr, found, err = cs.wal.SearchForEndHeight(endHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if err == io.EOF { + cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) + } else if err != nil { + return err + } + if !found { + return fmt.Errorf("cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, endHeight) + } + defer gr.Close() + + cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) + + var msg *TimedWALMessage + dec := WALDecoder{gr} + +LOOP: + for { + msg, err = dec.Decode() + switch { + case err == io.EOF: + break LOOP + case IsDataCorruptionError(err): + cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) + return err + case err != nil: + return err + } + + // NOTE: since the priv key is set when the msgs are received + // it will attempt to eg double sign but we can just ignore it + // since the votes will be replayed and we'll get to the next step + if err := cs.readReplayMessage(msg, nil); err != nil { + return err + } + } + cs.Logger.Info("Replay: Done") + return nil +} + +//-------------------------------------------------------------------------------- + +// Parses marker lines of the form: +// #ENDHEIGHT: 12345 +/* +func makeHeightSearchFunc(height int64) auto.SearchFunc { + return func(line string) (int, error) { + line = strings.TrimRight(line, "\n") + parts := strings.Split(line, " ") + if len(parts) != 2 { + return -1, errors.New("line did not have 2 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("failed to parse INFO: " + err.Error()) + } + if height < i { + return 1, nil + } else if height == i { + return 0, nil + } else { + return -1, nil + } + } +}*/ + +//--------------------------------------------------- +// 2. Recover from failure while applying the block. +// (by handshaking with the app to figure out where +// we were last, and using the WAL to recover there.) +//--------------------------------------------------- + +type Handshaker struct { + stateStore sm.Store + initialState sm.State + store sm.BlockStore + eventBus types.BlockEventPublisher + genDoc *types.GenesisDoc + logger log.Logger + + nBlocks int // number of blocks applied to the state +} + +func NewHandshaker(stateStore sm.Store, state sm.State, + store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { + + return &Handshaker{ + stateStore: stateStore, + initialState: state, + store: store, + eventBus: types.NopEventBus{}, + genDoc: genDoc, + logger: log.NewNopLogger(), + nBlocks: 0, + } +} + +func (h *Handshaker) SetLogger(l log.Logger) { + h.logger = l +} + +// SetEventBus - sets the event bus for publishing block related events. +// If not called, it defaults to types.NopEventBus. +func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { + h.eventBus = eventBus +} + +// NBlocks returns the number of blocks applied to the state. +func (h *Handshaker) NBlocks() int { + return h.nBlocks +} + +// TODO: retry the handshake/replay if it fails ? +func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { + + // Handshake is done via ABCI Info on the query conn. + res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo) + if err != nil { + return fmt.Errorf("error calling Info: %v", err) + } + + blockHeight := res.LastBlockHeight + if blockHeight < 0 { + return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) + } + appHash := res.LastBlockAppHash + + h.logger.Info("ABCI Handshake App Info", + "height", blockHeight, + "hash", fmt.Sprintf("%X", appHash), + "software-version", res.Version, + "protocol-version", res.AppVersion, + ) + + // Only set the version if there is no existing state. + if h.initialState.LastBlockHeight == 0 { + h.initialState.Version.Consensus.App = res.AppVersion + } + + // Replay blocks up to the latest in the blockstore. + _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + if err != nil { + return fmt.Errorf("error on replay: %v", err) + } + + h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", + "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + + // TODO: (on restart) replay mempool + + return nil +} + +// ReplayBlocks replays all blocks since appBlockHeight and ensures the result +// matches the current state. +// Returns the final AppHash or an error. +func (h *Handshaker) ReplayBlocks( + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, +) ([]byte, error) { + storeBlockBase := h.store.Base() + storeBlockHeight := h.store.Height() + stateBlockHeight := state.LastBlockHeight + h.logger.Info( + "ABCI Replay Blocks", + "appHeight", + appBlockHeight, + "storeHeight", + storeBlockHeight, + "stateHeight", + stateBlockHeight) + + // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. + if appBlockHeight == 0 { + validators := make([]*types.Validator, len(h.genDoc.Validators)) + for i, val := range h.genDoc.Validators { + validators[i] = types.NewValidator(val.PubKey, val.Power) + } + validatorSet := types.NewValidatorSet(validators) + nextVals := types.TM2PB.ValidatorUpdates(validatorSet) + csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) + req := abci.RequestInitChain{ + Time: h.genDoc.GenesisTime, + ChainId: h.genDoc.ChainID, + InitialHeight: h.genDoc.InitialHeight, + ConsensusParams: csParams, + Validators: nextVals, + AppStateBytes: h.genDoc.AppState, + } + res, err := proxyApp.Consensus().InitChainSync(context.Background(), req) + if err != nil { + return nil, err + } + + appHash = res.AppHash + + if stateBlockHeight == 0 { // we only update state when we are in initial state + // If the app did not return an app hash, we keep the one set from the genesis doc in + // the state. We don't set appHash since we don't want the genesis doc app hash + // recorded in the genesis block. We should probably just remove GenesisDoc.AppHash. + if len(res.AppHash) > 0 { + state.AppHash = res.AppHash + } + // If the app returned validators or consensus params, update the state. + if len(res.Validators) > 0 { + vals, err := types.PB2TM.ValidatorUpdates(res.Validators) + if err != nil { + return nil, err + } + state.Validators = types.NewValidatorSet(vals) + state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1) + } else if len(h.genDoc.Validators) == 0 { + // If validator set is not set in genesis and still empty after InitChain, exit. + return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") + } + + if res.ConsensusParams != nil { + state.ConsensusParams = types.UpdateConsensusParams(state.ConsensusParams, res.ConsensusParams) + state.Version.Consensus.App = state.ConsensusParams.Version.AppVersion + } + // We update the last results hash with the empty hash, to conform with RFC-6962. + state.LastResultsHash = merkle.HashFromByteSlices(nil) + if err := h.stateStore.Save(state); err != nil { + return nil, err + } + } + } + + // First handle edge cases and constraints on the storeBlockHeight and storeBlockBase. + switch { + case storeBlockHeight == 0: + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil + + case appBlockHeight == 0 && state.InitialHeight < storeBlockBase: + // the app has no state, and the block store is truncated above the initial height + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + + case appBlockHeight > 0 && appBlockHeight < storeBlockBase-1: + // the app is too far behind truncated store (can be 1 behind since we replay the next) + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + + case storeBlockHeight < appBlockHeight: + // the app should never be ahead of the store (but this is under app's control) + return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight} + + case storeBlockHeight < stateBlockHeight: + // the state should never be ahead of the store (this is under tendermint's control) + panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + + case storeBlockHeight > stateBlockHeight+1: + // store should be at most one ahead of the state (this is under tendermint's control) + panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + } + + var err error + // Now either store is equal to state, or one ahead. + // For each, consider all cases of where the app could be, given app <= store + if storeBlockHeight == stateBlockHeight { + // Tendermint ran Commit and saved the state. + // Either the app is asking for replay, or we're all synced up. + if appBlockHeight < storeBlockHeight { + // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + + } else if appBlockHeight == storeBlockHeight { + // We're good! + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil + } + + } else if storeBlockHeight == stateBlockHeight+1 { + // We saved the block in the store but haven't updated the state, + // so we'll need to replay a block using the WAL. + switch { + case appBlockHeight < stateBlockHeight: + // the app is further behind than it should be, so replay blocks + // but leave the last block to go through the WAL + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + + case appBlockHeight == stateBlockHeight: + // We haven't run Commit (both the state and app are one block behind), + // so replayBlock with the real app. + // NOTE: We could instead use the cs.WAL on cs.Start, + // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT + h.logger.Info("Replay last block using real app") + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + return state.AppHash, err + + case appBlockHeight == storeBlockHeight: + // We ran Commit, but didn't save the state, so replayBlock with mock app. + abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) + if err != nil { + return nil, err + } + mockApp := newMockProxyApp(appHash, abciResponses) + h.logger.Info("Replay last block using mock app") + state, err = h.replayBlock(state, storeBlockHeight, mockApp) + return state.AppHash, err + } + + } + + panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", + appBlockHeight, storeBlockHeight, stateBlockHeight)) +} + +func (h *Handshaker) replayBlocks( + state sm.State, + proxyApp proxy.AppConns, + appBlockHeight, + storeBlockHeight int64, + mutateState bool) ([]byte, error) { + // App is further behind than it should be, so we need to replay blocks. + // We replay all blocks from appBlockHeight+1. + // + // Note that we don't have an old version of the state, + // so we by-pass state validation/mutation using sm.ExecCommitBlock. + // This also means we won't be saving validator sets if they change during this period. + // TODO: Load the historical information to fix this and just use state.ApplyBlock + // + // If mutateState == true, the final block is replayed with h.replayBlock() + + var appHash []byte + var err error + finalBlock := storeBlockHeight + if mutateState { + finalBlock-- + } + firstBlock := appBlockHeight + 1 + if firstBlock == 1 { + firstBlock = state.InitialHeight + } + for i := firstBlock; i <= finalBlock; i++ { + h.logger.Info("Applying block", "height", i) + block := h.store.LoadBlock(i) + // Extra check to ensure the app was not changed in a way it shouldn't have. + if len(appHash) > 0 { + assertAppHashEqualsOneFromBlock(appHash, block) + } + + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight) + if err != nil { + return nil, err + } + + h.nBlocks++ + } + + if mutateState { + // sync the final block + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + if err != nil { + return nil, err + } + appHash = state.AppHash + } + + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil +} + +// ApplyBlock on the proxyApp with the last block. +func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { + block := h.store.LoadBlock(height) + meta := h.store.LoadBlockMeta(height) + + // Use stubs for both mempool and evidence pool since no transactions nor + // evidence are needed here - block already exists. + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}) + blockExec.SetEventBus(h.eventBus) + + var err error + state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) + if err != nil { + return sm.State{}, err + } + + h.nBlocks++ + + return state, nil +} + +func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) { + if !bytes.Equal(appHash, block.AppHash) { + panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X. + +Block: %v +`, + appHash, block.AppHash, block)) + } +} + +func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) { + if !bytes.Equal(appHash, state.AppHash) { + panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got +%X, expected %X. + +State: %v + +Did you reset Tendermint without resetting your application's data?`, + appHash, state.AppHash, state)) + } +} diff --git a/test/maverick/consensus/replay_file.go b/test/maverick/consensus/replay_file.go new file mode 100644 index 0000000000..bf96abab16 --- /dev/null +++ b/test/maverick/consensus/replay_file.go @@ -0,0 +1,338 @@ +package consensus + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + + dbm "github.com/tendermint/tm-db" + + cfg "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/libs/log" + tmos "github.com/lazyledger/lazyledger-core/libs/os" + "github.com/lazyledger/lazyledger-core/proxy" + sm "github.com/lazyledger/lazyledger-core/state" + "github.com/lazyledger/lazyledger-core/store" + "github.com/lazyledger/lazyledger-core/types" +) + +const ( + // event bus subscriber + subscriber = "replay-file" +) + +//-------------------------------------------------------- +// replay messages interactively or all at once + +// replay the wal file +func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { + consensusState := newConsensusStateForReplay(config, csConfig) + + if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { + tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) + } +} + +// Replay msgs in file or start the console +func (cs *State) ReplayFile(file string, console bool) error { + + if cs.IsRunning() { + return errors.New("cs is already running, cannot replay") + } + if cs.wal != nil { + return errors.New("cs wal is open, cannot replay") + } + + cs.startForReplay() + + // ensure all new step events are regenerated as expected + + ctx := context.Background() + newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + if err != nil { + return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) + } + defer func() { + if err := cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil { + cs.Logger.Error("Error unsubscribing to event bus", "err", err) + } + }() + + // just open the file for reading, no need to use wal + fp, err := os.OpenFile(file, os.O_RDONLY, 0600) + if err != nil { + return err + } + + pb := newPlayback(file, fp, cs, cs.state.Copy()) + defer pb.fp.Close() + + var nextN int // apply N msgs in a row + var msg *TimedWALMessage + for { + if nextN == 0 && console { + nextN = pb.replayConsoleLoop() + } + + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + return err + } + + if nextN > 0 { + nextN-- + } + pb.count++ + } +} + +//------------------------------------------------ +// playback manager + +type playback struct { + cs *State + + fp *os.File + dec *WALDecoder + count int // how many lines/msgs into the file are we + + // replays can be reset to beginning + fileName string // so we can close/reopen the file + genesisState sm.State // so the replay session knows where to restart from +} + +func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback { + return &playback{ + cs: cs, + fp: fp, + fileName: fileName, + genesisState: genState, + dec: NewWALDecoder(fp), + } +} + +// go back count steps by resetting the state and running (pb.count - count) steps +func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { + if err := pb.cs.Stop(); err != nil { + return err + } + pb.cs.Wait() + + newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, + pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, map[int64]Misbehavior{}) + newCS.SetEventBus(pb.cs.eventBus) + newCS.startForReplay() + + if err := pb.fp.Close(); err != nil { + return err + } + fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600) + if err != nil { + return err + } + pb.fp = fp + pb.dec = NewWALDecoder(fp) + count = pb.count - count + fmt.Printf("Reseting from %d to %d\n", pb.count, count) + pb.count = 0 + pb.cs = newCS + var msg *TimedWALMessage + for i := 0; i < count; i++ { + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + return err + } + pb.count++ + } + return nil +} + +func (cs *State) startForReplay() { + cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests") + /* TODO:! + // since we replay tocks we just ignore ticks + go func() { + for { + select { + case <-cs.tickChan: + case <-cs.Quit: + return + } + } + }()*/ +} + +// console function for parsing input and running commands +func (pb *playback) replayConsoleLoop() int { + for { + fmt.Printf("> ") + bufReader := bufio.NewReader(os.Stdin) + line, more, err := bufReader.ReadLine() + if more { + tmos.Exit("input is too long") + } else if err != nil { + tmos.Exit(err.Error()) + } + + tokens := strings.Split(string(line), " ") + if len(tokens) == 0 { + continue + } + + switch tokens[0] { + case "next": + // "next" -> replay next message + // "next N" -> replay next N messages + + if len(tokens) == 1 { + return 0 + } + i, err := strconv.Atoi(tokens[1]) + if err != nil { + fmt.Println("next takes an integer argument") + } else { + return i + } + + case "back": + // "back" -> go back one message + // "back N" -> go back N messages + + // NOTE: "back" is not supported in the state machine design, + // so we restart and replay up to + + ctx := context.Background() + // ensure all new step events are regenerated as expected + + newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + if err != nil { + tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + } + defer func() { + if err := pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil { + pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err) + } + }() + + if len(tokens) == 1 { + if err := pb.replayReset(1, newStepSub); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } + } else { + i, err := strconv.Atoi(tokens[1]) + if err != nil { + fmt.Println("back takes an integer argument") + } else if i > pb.count { + fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) + } else if err := pb.replayReset(i, newStepSub); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } + } + + case "rs": + // "rs" -> print entire round state + // "rs short" -> print height/round/step + // "rs " -> print another field of the round state + + rs := pb.cs.RoundState + if len(tokens) == 1 { + fmt.Println(rs) + } else { + switch tokens[1] { + case "short": + fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step) + case "validators": + fmt.Println(rs.Validators) + case "proposal": + fmt.Println(rs.Proposal) + case "proposal_block": + fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort()) + case "locked_round": + fmt.Println(rs.LockedRound) + case "locked_block": + fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort()) + case "votes": + fmt.Println(rs.Votes.StringIndented(" ")) + + default: + fmt.Println("Unknown option", tokens[1]) + } + } + case "n": + fmt.Println(pb.count) + } + } +} + +//-------------------------------------------------------------------------------- + +// convenience for replay mode +func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { + dbType := dbm.BackendType(config.DBBackend) + // Get BlockStore + blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + if err != nil { + tmos.Exit(err.Error()) + } + blockStore := store.NewBlockStore(blockStoreDB) + + // Get State + stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + if err != nil { + tmos.Exit(err.Error()) + } + stateStore := sm.NewStore(stateDB) + gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + if err != nil { + tmos.Exit(err.Error()) + } + state, err := sm.MakeGenesisState(gdoc) + if err != nil { + tmos.Exit(err.Error()) + } + + // Create proxyAppConn connection (consensus, mempool, query) + clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator) + err = proxyApp.Start() + if err != nil { + tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + } + + eventBus := types.NewEventBus() + if err := eventBus.Start(); err != nil { + tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) + } + + handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) + handshaker.SetEventBus(eventBus) + err = handshaker.Handshake(proxyApp) + if err != nil { + tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + } + + mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + + consensusState := NewState(csConfig, state.Copy(), blockExec, + blockStore, mempool, evpool, map[int64]Misbehavior{}) + + consensusState.SetEventBus(eventBus) + return consensusState +} diff --git a/test/maverick/consensus/replay_stubs.go b/test/maverick/consensus/replay_stubs.go new file mode 100644 index 0000000000..946f3e383a --- /dev/null +++ b/test/maverick/consensus/replay_stubs.go @@ -0,0 +1,90 @@ +package consensus + +import ( + abci "github.com/lazyledger/lazyledger-core/abci/types" + "github.com/lazyledger/lazyledger-core/libs/clist" + mempl "github.com/lazyledger/lazyledger-core/mempool" + tmstate "github.com/lazyledger/lazyledger-core/proto/tendermint/state" + "github.com/lazyledger/lazyledger-core/proxy" + "github.com/lazyledger/lazyledger-core/types" +) + +//----------------------------------------------------------------------------- + +type emptyMempool struct{} + +var _ mempl.Mempool = emptyMempool{} + +func (emptyMempool) Lock() {} +func (emptyMempool) Unlock() {} +func (emptyMempool) Size() int { return 0 } +func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { + return nil +} +func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) Update( + _ int64, + _ types.Txs, + _ []*abci.ResponseDeliverTx, + _ mempl.PreCheckFunc, + _ mempl.PostCheckFunc, +) error { + return nil +} +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn() error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) TxsBytes() int64 { return 0 } + +func (emptyMempool) TxsFront() *clist.CElement { return nil } +func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } + +func (emptyMempool) InitWAL() error { return nil } +func (emptyMempool) CloseWAL() {} + +//----------------------------------------------------------------------------- +// mockProxyApp uses ABCIResponses to give the right results. +// +// Useful because we don't want to call Commit() twice for the same block on +// the real app. + +func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { + clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ + appHash: appHash, + abciResponses: abciResponses, + }) + cli, _ := clientCreator.NewABCIClient() + err := cli.Start() + if err != nil { + panic(err) + } + return proxy.NewAppConnConsensus(cli) +} + +type mockProxyApp struct { + abci.BaseApplication + + appHash []byte + txCount int + abciResponses *tmstate.ABCIResponses +} + +func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + r := mock.abciResponses.DeliverTxs[mock.txCount] + mock.txCount++ + if r == nil { + return abci.ResponseDeliverTx{} + } + return *r +} + +func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + mock.txCount = 0 + return *mock.abciResponses.EndBlock +} + +func (mock *mockProxyApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{Data: mock.appHash} +} diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go new file mode 100644 index 0000000000..2ce110cbee --- /dev/null +++ b/test/maverick/consensus/state.go @@ -0,0 +1,1976 @@ +package consensus + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "runtime/debug" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + + cfg "github.com/lazyledger/lazyledger-core/config" + cstypes "github.com/lazyledger/lazyledger-core/consensus/types" + "github.com/lazyledger/lazyledger-core/crypto" + tmevents "github.com/lazyledger/lazyledger-core/libs/events" + "github.com/lazyledger/lazyledger-core/libs/fail" + tmjson "github.com/lazyledger/lazyledger-core/libs/json" + "github.com/lazyledger/lazyledger-core/libs/log" + tmmath "github.com/lazyledger/lazyledger-core/libs/math" + tmos "github.com/lazyledger/lazyledger-core/libs/os" + "github.com/lazyledger/lazyledger-core/libs/service" + "github.com/lazyledger/lazyledger-core/p2p" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + sm "github.com/lazyledger/lazyledger-core/state" + "github.com/lazyledger/lazyledger-core/types" + tmtime "github.com/lazyledger/lazyledger-core/types/time" +) + +// State handles execution of the consensus algorithm. +// It processes votes and proposals, and upon reaching agreement, +// commits blocks to the chain and executes them against the application. +// The internal state machine receives input from peers, the internal validator, and from a timer. +type State struct { + service.BaseService + + // config details + config *cfg.ConsensusConfig + privValidator types.PrivValidator // for signing votes + + // store blocks and commits + blockStore sm.BlockStore + + // create and execute blocks + blockExec *sm.BlockExecutor + + // notify us if txs are available + txNotifier txNotifier + + // add evidence to the pool + // when it's detected + evpool evidencePool + + // internal state + mtx sync.RWMutex + cstypes.RoundState + state sm.State // State until height-1. + + // state changes may be triggered by: msgs from peers, + // msgs from ourself, or by timeouts + peerMsgQueue chan msgInfo + internalMsgQueue chan msgInfo + timeoutTicker TimeoutTicker + // privValidator pubkey, memoized for the duration of one block + // to avoid extra requests to HSM + privValidatorPubKey crypto.PubKey + + // information about about added votes and block parts are written on this channel + // so statistics can be computed by reactor + statsMsgQueue chan msgInfo + + // we use eventBus to trigger msg broadcasts in the reactor, + // and to notify external subscribers, eg. through a websocket + eventBus *types.EventBus + + // a Write-Ahead Log ensures we can recover from any kind of crash + // and helps us avoid signing conflicting votes + wal WAL + replayMode bool // so we don't log signing errors during replay + doWALCatchup bool // determines if we even try to do the catchup + + // for tests where we want to limit the number of transitions the state makes + nSteps int + + // some functions can be overwritten for testing + decideProposal func(height int64, round int32) + + // closed when we finish shutting down + done chan struct{} + + // synchronous pubsub between consensus state and reactor. + // state only emits EventNewRoundStep and EventVote + evsw tmevents.EventSwitch + + // for reporting metrics + metrics *Metrics + + // misbehaviors mapped for each height (can't have more than one misbehavior per height) + misbehaviors map[int64]Misbehavior + + // the switch is passed to the state so that maveick misbehaviors can directly control which + // information they send to which nodes + sw *p2p.Switch +} + +// StateOption sets an optional parameter on the State. +type StateOption func(*State) + +// NewState returns a new State. +func NewState( + config *cfg.ConsensusConfig, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + txNotifier txNotifier, + evpool evidencePool, + misbehaviors map[int64]Misbehavior, + options ...StateOption, +) *State { + cs := &State{ + config: config, + blockExec: blockExec, + blockStore: blockStore, + txNotifier: txNotifier, + peerMsgQueue: make(chan msgInfo, msgQueueSize), + internalMsgQueue: make(chan msgInfo, msgQueueSize), + timeoutTicker: NewTimeoutTicker(), + statsMsgQueue: make(chan msgInfo, msgQueueSize), + done: make(chan struct{}), + doWALCatchup: true, + wal: nilWAL{}, + evpool: evpool, + evsw: tmevents.NewEventSwitch(), + metrics: NopMetrics(), + misbehaviors: misbehaviors, + } + // set function defaults (may be overwritten before calling Start) + cs.decideProposal = cs.defaultDecideProposal + + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + cs.reconstructLastCommit(state) + } + + cs.updateToState(state) + + // Don't call scheduleRound0 yet. + // We do that upon Start(). + + cs.BaseService = *service.NewBaseService(nil, "State", cs) + for _, option := range options { + option(cs) + } + return cs +} + +// I know this is not great but the maverick consensus state needs access to the peers +func (cs *State) SetSwitch(sw *p2p.Switch) { + cs.sw = sw +} + +// state transitions on complete-proposal, 2/3-any, 2/3-one +func (cs *State) handleMsg(mi msgInfo) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + var ( + added bool + err error + ) + msg, peerID := mi.Msg, mi.PeerID + switch msg := msg.(type) { + case *ProposalMessage: + // will not cause transition. + // once proposal is set, we can receive block parts + // err = cs.setProposal(msg.Proposal) + if b, ok := cs.misbehaviors[cs.Height]; ok { + err = b.ReceiveProposal(cs, msg.Proposal) + } else { + err = defaultReceiveProposal(cs, msg.Proposal) + } + case *BlockPartMessage: + // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit + added, err = cs.addProposalBlockPart(msg, peerID) + if added { + cs.statsMsgQueue <- mi + } + + if err != nil && msg.Round != cs.Round { + cs.Logger.Debug( + "Received block part from wrong round", + "height", + cs.Height, + "csRound", + cs.Round, + "blockRound", + msg.Round) + err = nil + } + case *VoteMessage: + // attempt to add the vote and dupeout the validator if its a duplicate signature + // if the vote gives us a 2/3-any or 2/3-one, we transition + added, err = cs.tryAddVote(msg.Vote, peerID) + if added { + cs.statsMsgQueue <- mi + } + + // if err == ErrAddingVote { + // TODO: punish peer + // We probably don't want to stop the peer here. The vote does not + // necessarily comes from a malicious peer but can be just broadcasted by + // a typical peer. + // https://github.com/tendermint/tendermint/issues/1281 + // } + + // NOTE: the vote is broadcast to peers by the reactor listening + // for vote events + + // TODO: If rs.Height == vote.Height && rs.Round < vote.Round, + // the peer is sending us CatchupCommit precommits. + // We could make note of this and help filter in broadcastHasVoteMessage(). + default: + cs.Logger.Error("Unknown msg type", "type", reflect.TypeOf(msg)) + return + } + + if err != nil { + cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, + "peer", peerID, "err", err, "msg", msg) + } +} + +// Enter (CreateEmptyBlocks): from enterNewRound(height,round) +// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool +func (cs *State) enterPropose(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { + logger.Debug(fmt.Sprintf( + "enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPropose: + cs.updateRoundStep(round, cstypes.RoundStepPropose) + cs.newStep() + + // If we have the whole proposal + POL, then goto Prevote now. + // else, we'll enterPrevote when the rest of the proposal is received (in AddProposalBlockPart), + // or else after timeoutPropose + if cs.isProposalComplete() { + cs.enterPrevote(height, cs.Round) + } + }() + + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.EnterPropose(cs, height, round) + } else { + defaultEnterPropose(cs, height, round) + } +} + +// Enter: `timeoutPropose` after entering Propose. +// Enter: proposal block and POL is ready. +// Prevote for LockedBlock if we're locked, or ProposalBlock if valid. +// Otherwise vote nil. +func (cs *State) enterPrevote(height int64, round int32) { + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { + cs.Logger.Debug(fmt.Sprintf( + "enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + + defer func() { + // Done enterPrevote: + cs.updateRoundStep(round, cstypes.RoundStepPrevote) + cs.newStep() + }() + + cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + // Sign and broadcast vote as necessary + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.EnterPrevote(cs, height, round) + } else { + defaultEnterPrevote(cs, height, round) + } + + // Once `addVote` hits any +2/3 prevotes, we will go to PrevoteWait + // (so we have more time to try and collect +2/3 prevotes for a single block) +} + +// Enter: `timeoutPrevote` after any +2/3 prevotes. +// Enter: `timeoutPrecommit` after any +2/3 precommits. +// Enter: +2/3 precomits for block or nil. +// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) +// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, +// else, precommit nil otherwise. +func (cs *State) enterPrecommit(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { + logger.Debug(fmt.Sprintf( + "enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + + logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrecommit: + cs.updateRoundStep(round, cstypes.RoundStepPrecommit) + cs.newStep() + }() + + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.EnterPrecommit(cs, height, round) + } else { + defaultEnterPrecommit(cs, height, round) + } + +} + +func (cs *State) addVote( + vote *types.Vote, + peerID p2p.ID) (added bool, err error) { + cs.Logger.Debug( + "addVote", + "voteHeight", + vote.Height, + "voteType", + vote.Type, + "valIndex", + vote.ValidatorIndex, + "csHeight", + cs.Height, + ) + + // A precommit for the previous height? + // These come in while we wait timeoutCommit + if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType { + if cs.Step != cstypes.RoundStepNewHeight { + // Late precommit at prior height is ignored + cs.Logger.Debug("Precommit vote came in after commit timeout and has been ignored", "vote", vote) + return + } + added, err = cs.LastCommit.AddVote(vote) + if !added { + return + } + + cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) + _ = cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) + cs.evsw.FireEvent(types.EventVote, vote) + + // if we can skip timeoutCommit and have all the votes now, + if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { + // go straight to new round (skip timeout commit) + // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) + cs.enterNewRound(cs.Height, 0) + } + + return + } + + // Height mismatch is ignored. + // Not necessarily a bad peer, but not favourable behaviour. + if vote.Height != cs.Height { + cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "peerID", peerID) + return + } + + added, err = cs.Votes.AddVote(vote, peerID) + if !added { + // Either duplicate, or error upon cs.Votes.AddByIndex() + return + } + + _ = cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) + cs.evsw.FireEvent(types.EventVote, vote) + + switch vote.Type { + case tmproto.PrevoteType: + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.ReceivePrevote(cs, vote) + } else { + defaultReceivePrevote(cs, vote) + } + + case tmproto.PrecommitType: + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.ReceivePrecommit(cs, vote) + } + defaultReceivePrecommit(cs, vote) + + default: + panic(fmt.Sprintf("Unexpected vote type %v", vote.Type)) + } + + return added, err +} + +//----------------------------------------------------------------------------- +// Errors + +var ( + ErrInvalidProposalSignature = errors.New("error invalid proposal signature") + ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") + ErrAddingVote = errors.New("error adding vote") + ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key") + + errPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors") +) + +//----------------------------------------------------------------------------- + +var ( + msgQueueSize = 1000 +) + +// msgs from the reactor which may update the state +type msgInfo struct { + Msg Message `json:"msg"` + PeerID p2p.ID `json:"peer_key"` +} + +// internally generated messages which may update the state +type timeoutInfo struct { + Duration time.Duration `json:"duration"` + Height int64 `json:"height"` + Round int32 `json:"round"` + Step cstypes.RoundStepType `json:"step"` +} + +func (ti *timeoutInfo) String() string { + return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) +} + +// interface to the mempool +type txNotifier interface { + TxsAvailable() <-chan struct{} +} + +// interface to the evidence pool +type evidencePool interface { + // Adds consensus based evidence to the evidence pool where time is the time + // of the block where the offense occurred and the validator set is the current one. + AddEvidenceFromConsensus(evidence types.Evidence) error +} + +//---------------------------------------- +// Public interface + +// SetLogger implements Service. +func (cs *State) SetLogger(l log.Logger) { + cs.BaseService.Logger = l + cs.timeoutTicker.SetLogger(l) +} + +// SetEventBus sets event bus. +func (cs *State) SetEventBus(b *types.EventBus) { + cs.eventBus = b + cs.blockExec.SetEventBus(b) +} + +// StateMetrics sets the metrics. +func StateMetrics(metrics *Metrics) StateOption { + return func(cs *State) { cs.metrics = metrics } +} + +// String returns a string. +func (cs *State) String() string { + // better not to access shared variables + return "ConsensusState" +} + +// GetState returns a copy of the chain state. +func (cs *State) GetState() sm.State { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return cs.state.Copy() +} + +// GetLastHeight returns the last height committed. +// If there were no blocks, returns 0. +func (cs *State) GetLastHeight() int64 { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return cs.RoundState.Height - 1 +} + +// GetRoundState returns a shallow copy of the internal consensus state. +func (cs *State) GetRoundState() *cstypes.RoundState { + cs.mtx.RLock() + rs := cs.RoundState // copy + cs.mtx.RUnlock() + return &rs +} + +// GetRoundStateJSON returns a json of RoundState. +func (cs *State) GetRoundStateJSON() ([]byte, error) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return tmjson.Marshal(cs.RoundState) +} + +// GetRoundStateSimpleJSON returns a json of RoundStateSimple +func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return tmjson.Marshal(cs.RoundState.RoundStateSimple()) +} + +// GetValidators returns a copy of the current validators. +func (cs *State) GetValidators() (int64, []*types.Validator) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators +} + +// SetPrivValidator sets the private validator account for signing votes. It +// immediately requests pubkey and caches it. +func (cs *State) SetPrivValidator(priv types.PrivValidator) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + cs.privValidator = priv + + if err := cs.updatePrivValidatorPubKey(); err != nil { + cs.Logger.Error("Can't get private validator pubkey", "err", err) + } +} + +// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing. +func (cs *State) SetTimeoutTicker(timeoutTicker TimeoutTicker) { + cs.mtx.Lock() + cs.timeoutTicker = timeoutTicker + cs.mtx.Unlock() +} + +// LoadCommit loads the commit for a given height. +func (cs *State) LoadCommit(height int64) *types.Commit { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + if height == cs.blockStore.Height() { + return cs.blockStore.LoadSeenCommit(height) + } + return cs.blockStore.LoadBlockCommit(height) +} + +// OnStart loads the latest state via the WAL, and starts the timeout and +// receive routines. +func (cs *State) OnStart() error { + // We may set the WAL in testing before calling Start, so only OpenWAL if its + // still the nilWAL. + if _, ok := cs.wal.(nilWAL); ok { + if err := cs.loadWalFile(); err != nil { + return err + } + } + + // We may have lost some votes if the process crashed reload from consensus + // log to catchup. + if cs.doWALCatchup { + repairAttempted := false + LOOP: + for { + err := cs.catchupReplay(cs.Height) + switch { + case err == nil: + break LOOP + case !IsDataCorruptionError(err): + cs.Logger.Error("Error on catchup replay. Proceeding to start State anyway", "err", err) + break LOOP + case repairAttempted: + return err + } + + cs.Logger.Info("WAL file is corrupted. Attempting repair", "err", err) + + // 1) prep work + if err := cs.wal.Stop(); err != nil { + return err + } + repairAttempted = true + + // 2) backup original WAL file + corruptedFile := fmt.Sprintf("%s.CORRUPTED", cs.config.WalFile()) + if err := tmos.CopyFile(cs.config.WalFile(), corruptedFile); err != nil { + return err + } + cs.Logger.Info("Backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) + + // 3) try to repair (WAL file will be overwritten!) + if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { + cs.Logger.Error("Repair failed", "err", err) + return err + } + cs.Logger.Info("Successful repair") + + // reload WAL file + if err := cs.loadWalFile(); err != nil { + return err + } + } + } + + if err := cs.evsw.Start(); err != nil { + return err + } + + // we need the timeoutRoutine for replay so + // we don't block on the tick chan. + // NOTE: we will get a build up of garbage go routines + // firing on the tockChan until the receiveRoutine is started + // to deal with them (by that point, at most one will be valid) + if err := cs.timeoutTicker.Start(); err != nil { + return err + } + + // Double Signing Risk Reduction + if err := cs.checkDoubleSigningRisk(cs.Height); err != nil { + return err + } + + // now start the receiveRoutine + go cs.receiveRoutine(0) + + // schedule the first round! + // use GetRoundState so we don't race the receiveRoutine for access + cs.scheduleRound0(cs.GetRoundState()) + + return nil +} + +// loadWalFile loads WAL data from file. It overwrites cs.wal. +func (cs *State) loadWalFile() error { + wal, err := cs.OpenWAL(cs.config.WalFile()) + if err != nil { + cs.Logger.Error("Error loading State wal", "err", err) + return err + } + cs.wal = wal + return nil +} + +// OnStop implements service.Service. +func (cs *State) OnStop() { + if err := cs.evsw.Stop(); err != nil { + cs.Logger.Error("error trying to stop eventSwitch", "error", err) + } + if err := cs.timeoutTicker.Stop(); err != nil { + cs.Logger.Error("error trying to stop timeoutTicket", "error", err) + } + // WAL is stopped in receiveRoutine. +} + +// Wait waits for the the main routine to return. +// NOTE: be sure to Stop() the event switch and drain +// any event channels or this may deadlock +func (cs *State) Wait() { + <-cs.done +} + +// OpenWAL opens a file to log all consensus messages and timeouts for +// deterministic accountability. +func (cs *State) OpenWAL(walFile string) (WAL, error) { + wal, err := NewWAL(walFile) + if err != nil { + cs.Logger.Error("Failed to open WAL", "file", walFile, "err", err) + return nil, err + } + wal.SetLogger(cs.Logger.With("wal", walFile)) + if err := wal.Start(); err != nil { + cs.Logger.Error("Failed to start WAL", "err", err) + return nil, err + } + return wal, nil +} + +//------------------------------------------------------------ +// Public interface for passing messages into the consensus state, possibly causing a state transition. +// If peerID == "", the msg is considered internal. +// Messages are added to the appropriate queue (peer or internal). +// If the queue is full, the function may block. +// TODO: should these return anything or let callers just use events? + +// AddVote inputs a vote. +func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + } + + // TODO: wait for event?! + return false, nil +} + +// SetProposal inputs a proposal. +func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { + + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + } + + // TODO: wait for event?! + return nil +} + +// AddProposalBlockPart inputs a part of the proposal block. +func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { + + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + } + + // TODO: wait for event?! + return nil +} + +// SetProposalAndBlock inputs the proposal and all block parts. +func (cs *State) SetProposalAndBlock( + proposal *types.Proposal, + block *types.Block, + parts *types.PartSet, + peerID p2p.ID, +) error { + if err := cs.SetProposal(proposal, peerID); err != nil { + return err + } + for i := 0; i < int(parts.Total()); i++ { + part := parts.GetPart(i) + if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------ +// internal functions for managing the state + +func (cs *State) updateHeight(height int64) { + cs.metrics.Height.Set(float64(height)) + cs.Height = height +} + +func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { + cs.Round = round + cs.Step = step +} + +// enterNewRound(height, 0) at cs.StartTime. +func (cs *State) scheduleRound0(rs *cstypes.RoundState) { + // cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + sleepDuration := rs.StartTime.Sub(tmtime.Now()) + cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) +} + +// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) +func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int32, step cstypes.RoundStepType) { + cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) +} + +// send a msg into the receiveRoutine regarding our own proposal, block part, or vote +func (cs *State) sendInternalMessage(mi msgInfo) { + select { + case cs.internalMsgQueue <- mi: + default: + // NOTE: using the go-routine means our votes can + // be processed out of order. + // TODO: use CList here for strict determinism and + // attempt push to internalMsgQueue in receiveRoutine + cs.Logger.Info("Internal msg queue is full. Using a go-routine") + go func() { cs.internalMsgQueue <- mi }() + } +} + +// Reconstruct LastCommit from SeenCommit, which we saved along with the block, +// (which happens even before saving the state) +func (cs *State) reconstructLastCommit(state sm.State) { + seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) + if seenCommit == nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: seen commit for height %v not found", + state.LastBlockHeight)) + } + + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) + if !lastPrecommits.HasTwoThirdsMajority() { + panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") + } + + cs.LastCommit = lastPrecommits +} + +// Updates State and increments height to match that of state. +// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. +func (cs *State) updateToState(state sm.State) { + if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { + panic(fmt.Sprintf("updateToState() expected state height of %v but found %v", + cs.Height, state.LastBlockHeight)) + } + if !cs.state.IsEmpty() { + if cs.state.LastBlockHeight > 0 && cs.state.LastBlockHeight+1 != cs.Height { + // This might happen when someone else is mutating cs.state. + // Someone forgot to pass in state.Copy() somewhere?! + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + cs.state.LastBlockHeight+1, cs.Height)) + } + if cs.state.LastBlockHeight > 0 && cs.Height == cs.state.InitialHeight { + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight %v, expected 0 for initial height %v", + cs.state.LastBlockHeight, cs.state.InitialHeight)) + } + + // If state isn't further out than cs.state, just ignore. + // This happens when SwitchToConsensus() is called in the reactor. + // We don't want to reset e.g. the Votes, but we still want to + // signal the new round step, because other services (eg. txNotifier) + // depend on having an up-to-date peer state! + if state.LastBlockHeight <= cs.state.LastBlockHeight { + cs.Logger.Info( + "Ignoring updateToState()", + "newHeight", + state.LastBlockHeight+1, + "oldHeight", + cs.state.LastBlockHeight+1) + cs.newStep() + return + } + } + + // Reset fields based on state. + validators := state.Validators + + switch { + case state.LastBlockHeight == 0: // Very first commit should be empty. + cs.LastCommit = (*types.VoteSet)(nil) + case cs.CommitRound > -1 && cs.Votes != nil: // Otherwise, use cs.Votes + if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { + panic(fmt.Sprintf("Wanted to form a Commit, but Precommits (H/R: %d/%d) didn't have 2/3+: %v", + state.LastBlockHeight, + cs.CommitRound, + cs.Votes.Precommits(cs.CommitRound))) + } + cs.LastCommit = cs.Votes.Precommits(cs.CommitRound) + case cs.LastCommit == nil: + // NOTE: when Tendermint starts, it has no votes. reconstructLastCommit + // must be called to reconstruct LastCommit from SeenCommit. + panic(fmt.Sprintf("LastCommit cannot be empty after initial block (H:%d)", + state.LastBlockHeight+1, + )) + } + + // Next desired block height + height := state.LastBlockHeight + 1 + if height == 1 { + height = state.InitialHeight + } + + // RoundState fields + cs.updateHeight(height) + cs.updateRoundStep(0, cstypes.RoundStepNewHeight) + if cs.CommitTime.IsZero() { + // "Now" makes it easier to sync up dev nodes. + // We add timeoutCommit to allow transactions + // to be gathered for the first block. + // And alternative solution that relies on clocks: + // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) + cs.StartTime = cs.config.Commit(tmtime.Now()) + } else { + cs.StartTime = cs.config.Commit(cs.CommitTime) + } + + cs.Validators = validators + cs.Proposal = nil + cs.ProposalBlock = nil + cs.ProposalBlockParts = nil + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + cs.ValidRound = -1 + cs.ValidBlock = nil + cs.ValidBlockParts = nil + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.CommitRound = -1 + cs.LastValidators = state.LastValidators + cs.TriggeredTimeoutPrecommit = false + + cs.state = state + + // Finally, broadcast RoundState + cs.newStep() +} + +func (cs *State) newStep() { + rs := cs.RoundStateEvent() + if err := cs.wal.Write(rs); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } + cs.nSteps++ + // newStep is called by updateToState in NewState before the eventBus is set! + if cs.eventBus != nil { + if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { + cs.Logger.Error("Error publishing new round step", "err", err) + } + cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState) + } +} + +//----------------------------------------- +// the main go routines + +// receiveRoutine handles messages which may cause state transitions. +// it's argument (n) is the number of messages to process before exiting - use 0 to run forever +// It keeps the RoundState and is the only thing that updates it. +// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +// State must be locked before any internal state is updated. +func (cs *State) receiveRoutine(maxSteps int) { + onExit := func(cs *State) { + // NOTE: the internalMsgQueue may have signed messages from our + // priv_val that haven't hit the WAL, but its ok because + // priv_val tracks LastSig + + // close wal now that we're done writing to it + if err := cs.wal.Stop(); err != nil { + cs.Logger.Error("error trying to stop wal", "error", err) + } + cs.wal.Wait() + + close(cs.done) + } + + defer func() { + if r := recover(); r != nil { + cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) + // stop gracefully + // + // NOTE: We most probably shouldn't be running any further when there is + // some unexpected panic. Some unknown error happened, and so we don't + // know if that will result in the validator signing an invalid thing. It + // might be worthwhile to explore a mechanism for manual resuming via + // some console or secure RPC system, but for now, halting the chain upon + // unexpected consensus bugs sounds like the better option. + onExit(cs) + } + }() + + for { + if maxSteps > 0 { + if cs.nSteps >= maxSteps { + cs.Logger.Info("reached max steps. exiting receive routine") + cs.nSteps = 0 + return + } + } + rs := cs.RoundState + var mi msgInfo + + select { + case <-cs.txNotifier.TxsAvailable(): + cs.handleTxsAvailable() + case mi = <-cs.peerMsgQueue: + if err := cs.wal.Write(mi); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } + // handles proposals, block parts, votes + // may generate internal events (votes, complete proposals, 2/3 majorities) + cs.handleMsg(mi) + case mi = <-cs.internalMsgQueue: + err := cs.wal.WriteSync(mi) // NOTE: fsync + if err != nil { + panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node", mi, err)) + } + + if _, ok := mi.Msg.(*VoteMessage); ok { + // we actually want to simulate failing during + // the previous WriteSync, but this isn't easy to do. + // Equivalent would be to fail here and manually remove + // some bytes from the end of the wal. + fail.Fail() // XXX + } + + // handles proposals, block parts, votes + cs.handleMsg(mi) + case ti := <-cs.timeoutTicker.Chan(): // tockChan: + if err := cs.wal.Write(ti); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } + // if the timeout is relevant to the rs + // go to the next step + cs.handleTimeout(ti, rs) + case <-cs.Quit(): + onExit(cs) + return + } + } +} + +func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { + cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + + // timeouts must be for current height, round, step + if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { + cs.Logger.Debug("Ignoring tock because we're ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) + return + } + + // the timeout will now cause a state transition + cs.mtx.Lock() + defer cs.mtx.Unlock() + + switch ti.Step { + case cstypes.RoundStepNewHeight: + // NewRound event fired from enterNewRound. + // XXX: should we fire timeout here (for timeout commit)? + cs.enterNewRound(ti.Height, 0) + case cstypes.RoundStepNewRound: + cs.enterPropose(ti.Height, 0) + case cstypes.RoundStepPropose: + if err := cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout propose", "err", err) + } + cs.enterPrevote(ti.Height, ti.Round) + case cstypes.RoundStepPrevoteWait: + if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout wait", "err", err) + } + cs.enterPrecommit(ti.Height, ti.Round) + case cstypes.RoundStepPrecommitWait: + if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout wait", "err", err) + } + cs.enterPrecommit(ti.Height, ti.Round) + cs.enterNewRound(ti.Height, ti.Round+1) + default: + panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step)) + } + +} + +func (cs *State) handleTxsAvailable() { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + // We only need to do this for round 0. + if cs.Round != 0 { + return + } + + switch cs.Step { + case cstypes.RoundStepNewHeight: // timeoutCommit phase + if cs.needProofBlock(cs.Height) { + // enterPropose will be called by enterNewRound + return + } + + // +1ms to ensure RoundStepNewRound timeout always happens after RoundStepNewHeight + timeoutCommit := cs.StartTime.Sub(tmtime.Now()) + 1*time.Millisecond + cs.scheduleTimeout(timeoutCommit, cs.Height, 0, cstypes.RoundStepNewRound) + case cstypes.RoundStepNewRound: // after timeoutCommit + cs.enterPropose(cs.Height, 0) + } +} + +//----------------------------------------------------------------------------- +// State functions +// Used internally by handleTimeout and handleMsg to make state transitions + +// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) +// Enter: +2/3 precommits for nil at (height,round-1) +// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) +// NOTE: cs.StartTime was already set for height. +func (cs *State) enterNewRound(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { + logger.Debug(fmt.Sprintf( + "enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + + if now := tmtime.Now(); cs.StartTime.After(now) { + logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) + } + + logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + // Increment validators if necessary + validators := cs.Validators + if cs.Round < round { + validators = validators.Copy() + validators.IncrementProposerPriority(tmmath.SafeSubInt32(round, cs.Round)) + } + + // Setup new round + // we don't fire newStep for this step, + // but we fire an event, so update the round step first + cs.updateRoundStep(round, cstypes.RoundStepNewRound) + cs.Validators = validators + if round == 0 { + // We've already reset these upon new height, + // and meanwhile we might have received a proposal + // for round 0. + } else { + logger.Info("Resetting Proposal info") + cs.Proposal = nil + cs.ProposalBlock = nil + cs.ProposalBlockParts = nil + } + cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping + cs.TriggeredTimeoutPrecommit = false + + if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil { + cs.Logger.Error("Error publishing new round", "err", err) + } + cs.metrics.Rounds.Set(float64(round)) + + // Wait for txs to be available in the mempool + // before we enterPropose in round 0. If the last block changed the app hash, + // we may need an empty "proof" block, and enterPropose immediately. + waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) + if waitForTxs { + if cs.config.CreateEmptyBlocksInterval > 0 { + cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, + cstypes.RoundStepNewRound) + } + } else { + cs.enterPropose(height, round) + } +} + +// needProofBlock returns true on the first height (so the genesis app hash is signed right away) +// and where the last block (height-1) caused the app hash to change +func (cs *State) needProofBlock(height int64) bool { + if height == cs.state.InitialHeight { + return true + } + + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta == nil { + panic(fmt.Sprintf("needProofBlock: last block meta for height %d not found", height-1)) + } + return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) +} + +func (cs *State) isProposer(address []byte) bool { + return bytes.Equal(cs.Validators.GetProposer().Address, address) +} + +func (cs *State) defaultDecideProposal(height int64, round int32) { + var block *types.Block + var blockParts *types.PartSet + + // Decide on block + if cs.ValidBlock != nil { + // If there is valid block, choose that. + block, blockParts = cs.ValidBlock, cs.ValidBlockParts + } else { + // Create a new proposal block from state/txs from the mempool. + block, blockParts = cs.createProposalBlock() + if block == nil { + return + } + } + + // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, + // and the privValidator will refuse to sign anything. + if err := cs.wal.FlushAndSync(); err != nil { + cs.Logger.Error("Error flushing to disk") + } + + // Make proposal + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} + proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) + p := proposal.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { + proposal.Signature = p.Signature + + // send proposal and block parts on internal msg queue + cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + for i := 0; i < int(blockParts.Total()); i++ { + part := blockParts.GetPart(i) + cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + } + cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) + cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) + } else if !cs.replayMode { + cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) + } +} + +// Returns true if the proposal block is complete && +// (if POLRound was proposed, we have +2/3 prevotes from there). +func (cs *State) isProposalComplete() bool { + if cs.Proposal == nil || cs.ProposalBlock == nil { + return false + } + // we have the proposal. if there's a POLRound, + // make sure we have the prevotes from it too + if cs.Proposal.POLRound < 0 { + return true + } + // if this is false the proposer is lying or we haven't received the POL yet + return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() + +} + +// Create the next block to propose and return it. Returns nil block upon error. +// +// We really only need to return the parts, but the block is returned for +// convenience so we can log the proposal block. +// +// NOTE: keep it side-effect free for clarity. +// CONTRACT: cs.privValidator is not nil. +func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { + if cs.privValidator == nil { + panic("entered createProposalBlock with privValidator being nil") + } + + var commit *types.Commit + switch { + case cs.Height == cs.state.InitialHeight: + // We're creating a proposal for the first block. + // The commit is empty, but not nil. + commit = types.NewCommit(0, 0, types.BlockID{}, nil) + case cs.LastCommit.HasTwoThirdsMajority(): + // Make the commit from LastCommit + commit = cs.LastCommit.MakeCommit() + default: // This shouldn't happen. + cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") + return + } + + if cs.privValidatorPubKey == nil { + // If this node is a validator & proposer in the current round, it will + // miss the opportunity to create a block. + cs.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet)) + return + } + proposerAddr := cs.privValidatorPubKey.Address() + + return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr) +} + +// Enter: any +2/3 prevotes at next round. +func (cs *State) enterPrevoteWait(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { + logger.Debug(fmt.Sprintf( + "enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { + panic(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + } + logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrevoteWait: + cs.updateRoundStep(round, cstypes.RoundStepPrevoteWait) + cs.newStep() + }() + + // Wait for some more prevotes; enterPrecommit + cs.scheduleTimeout(cs.config.Prevote(round), height, round, cstypes.RoundStepPrevoteWait) +} + +// Enter: any +2/3 precommits for next round. +func (cs *State) enterPrecommitWait(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { + logger.Debug( + fmt.Sprintf( + "enterPrecommitWait(%v/%v): Invalid args. "+ + "Current state is Height/Round: %v/%v/, TriggeredTimeoutPrecommit:%v", + height, round, cs.Height, cs.Round, cs.TriggeredTimeoutPrecommit)) + return + } + if !cs.Votes.Precommits(round).HasTwoThirdsAny() { + panic(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + } + logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrecommitWait: + cs.TriggeredTimeoutPrecommit = true + cs.newStep() + }() + + // Wait for some more precommits; enterNewRound + cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) +} + +// Enter: +2/3 precommits for block +func (cs *State) enterCommit(height int64, commitRound int32) { + logger := cs.Logger.With("height", height, "commitRound", commitRound) + + if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { + logger.Debug(fmt.Sprintf( + "enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + commitRound, + cs.Height, + cs.Round, + cs.Step)) + return + } + logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterCommit: + // keep cs.Round the same, commitRound points to the right Precommits set. + cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) + cs.CommitRound = commitRound + cs.CommitTime = tmtime.Now() + cs.newStep() + + // Maybe finalize immediately. + cs.tryFinalizeCommit(height) + }() + + blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() + if !ok { + panic("RunActionCommit() expects +2/3 precommits") + } + + // The Locked* fields no longer matter. + // Move them over to ProposalBlock if they match the commit hash, + // otherwise they'll be cleared in updateToState. + if cs.LockedBlock.HashesTo(blockID.Hash) { + logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) + cs.ProposalBlock = cs.LockedBlock + cs.ProposalBlockParts = cs.LockedBlockParts + } + + // If we don't have the block being committed, set up to get it. + if !cs.ProposalBlock.HashesTo(blockID.Hash) { + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + logger.Info( + "Commit is for a block we don't know about. Set ProposalBlock=nil", + "proposal", + cs.ProposalBlock.Hash(), + "commit", + blockID.Hash) + // We're getting the wrong block. + // Set up ProposalBlockParts and keep waiting. + cs.ProposalBlock = nil + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing valid block", "err", err) + } + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + } + // else { + // We just need to keep waiting. + // } + } +} + +// If we have the block AND +2/3 commits for it, finalize. +func (cs *State) tryFinalizeCommit(height int64) { + logger := cs.Logger.With("height", height) + + if cs.Height != height { + panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + } + + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() + if !ok || len(blockID.Hash) == 0 { + logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for .") + return + } + if !cs.ProposalBlock.HashesTo(blockID.Hash) { + // TODO: this happens every time if we're not a validator (ugly logs) + // TODO: ^^ wait, why does it matter that we're a validator? + logger.Info( + "Attempt to finalize failed. We don't have the commit block.", + "proposal-block", + cs.ProposalBlock.Hash(), + "commit-block", + blockID.Hash) + return + } + + // go + cs.finalizeCommit(height) +} + +// Increment height and goto cstypes.RoundStepNewHeight +func (cs *State) finalizeCommit(height int64) { + if cs.Height != height || cs.Step != cstypes.RoundStepCommit { + cs.Logger.Debug(fmt.Sprintf( + "finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", + height, + cs.Height, + cs.Round, + cs.Step)) + return + } + + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() + block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts + + if !ok { + panic("Cannot finalizeCommit, commit does not have two thirds majority") + } + if !blockParts.HasHeader(blockID.PartSetHeader) { + panic("Expected ProposalBlockParts header to be commit header") + } + if !block.HashesTo(blockID.Hash) { + panic("Cannot finalizeCommit, ProposalBlock does not hash to commit hash") + } + if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { + panic(fmt.Errorf("+2/3 committed an invalid block: %w", err)) + } + + cs.Logger.Info("Finalizing commit of block with N txs", + "height", block.Height, + "hash", block.Hash(), + "root", block.AppHash, + "N", len(block.Txs)) + cs.Logger.Info(fmt.Sprintf("%v", block)) + + fail.Fail() // XXX + + // Save to blockStore. + if cs.blockStore.Height() < block.Height { + // NOTE: the seenCommit is local justification to commit this block, + // but may differ from the LastCommit included in the next block + precommits := cs.Votes.Precommits(cs.CommitRound) + seenCommit := precommits.MakeCommit() + cs.blockStore.SaveBlock(block, blockParts, seenCommit) + } else { + // Happens during replay if we already saved the block but didn't commit + cs.Logger.Info("Calling finalizeCommit on already stored block", "height", block.Height) + } + + fail.Fail() // XXX + + // Write EndHeightMessage{} for this height, implying that the blockstore + // has saved the block. + // + // If we crash before writing this EndHeightMessage{}, we will recover by + // running ApplyBlock during the ABCI handshake when we restart. If we + // didn't save the block to the blockstore before writing + // EndHeightMessage{}, we'd have to change WAL replay -- currently it + // complains about replaying for heights where an #ENDHEIGHT entry already + // exists. + // + // Either way, the State should not be resumed until we + // successfully call ApplyBlock (ie. later here, or in Handshake after + // restart). + endMsg := EndHeightMessage{height} + if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync + panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node", + endMsg, err)) + } + + fail.Fail() // XXX + + // Create a copy of the state for staging and an event cache for txs. + stateCopy := cs.state.Copy() + + // Execute and commit the block, update and save the state, and update the mempool. + // NOTE The block.AppHash wont reflect these txs until the next block. + var err error + var retainHeight int64 + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( + stateCopy, + types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}, + block) + if err != nil { + cs.Logger.Error("Error on ApplyBlock", "err", err) + return + } + + fail.Fail() // XXX + + // Prune old heights, if requested by ABCI app. + if retainHeight > 0 { + pruned, err := cs.pruneBlocks(retainHeight) + if err != nil { + cs.Logger.Error("Failed to prune blocks", "retainHeight", retainHeight, "err", err) + } else { + cs.Logger.Info("Pruned blocks", "pruned", pruned, "retainHeight", retainHeight) + } + } + + // must be called before we update state + cs.recordMetrics(height, block) + + // NewHeightStep! + cs.updateToState(stateCopy) + + fail.Fail() // XXX + + // Private validator might have changed it's key pair => refetch pubkey. + if err := cs.updatePrivValidatorPubKey(); err != nil { + cs.Logger.Error("Can't get private validator pubkey", "err", err) + } + + // cs.StartTime is already set. + // Schedule Round0 to start soon. + cs.scheduleRound0(&cs.RoundState) + + // By here, + // * cs.Height has been increment to height+1 + // * cs.Step is now cstypes.RoundStepNewHeight + // * cs.StartTime is set to when we will start round0. +} + +func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { + base := cs.blockStore.Base() + if retainHeight <= base { + return 0, nil + } + pruned, err := cs.blockStore.PruneBlocks(retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune block store: %w", err) + } + err = cs.blockExec.Store().PruneStates(base, retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune state database: %w", err) + } + return pruned, nil +} + +func (cs *State) recordMetrics(height int64, block *types.Block) { + cs.metrics.Validators.Set(float64(cs.Validators.Size())) + cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) + + var ( + missingValidators int + missingValidatorsPower int64 + ) + // height=0 -> MissingValidators and MissingValidatorsPower are both 0. + // Remember that the first LastCommit is intentionally empty, so it's not + // fair to increment missing validators number. + if height > cs.state.InitialHeight { + // Sanity check that commit size matches validator set size - only applies + // after first block. + var ( + commitSize = block.LastCommit.Size() + valSetLen = len(cs.LastValidators.Validators) + address types.Address + ) + if commitSize != valSetLen { + panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", + commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) + } + + if cs.privValidator != nil { + if cs.privValidatorPubKey == nil { + // Metrics won't be updated, but it's not critical. + cs.Logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet)) + } else { + address = cs.privValidatorPubKey.Address() + } + } + + for i, val := range cs.LastValidators.Validators { + commitSig := block.LastCommit.Signatures[i] + if commitSig.Absent() { + missingValidators++ + missingValidatorsPower += val.VotingPower + } + + if bytes.Equal(val.Address, address) { + label := []string{ + "validator_address", val.Address.String(), + } + cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) + if commitSig.ForBlock() { + cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) + } else { + cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) + } + } + + } + } + cs.metrics.MissingValidators.Set(float64(missingValidators)) + cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) + + // NOTE: byzantine validators power and count is only for consensus evidence i.e. duplicate vote + var ( + byzantineValidatorsPower = int64(0) + byzantineValidatorsCount = int64(0) + ) + for _, ev := range block.Evidence.Evidence { + if dve, ok := ev.(*types.DuplicateVoteEvidence); ok { + if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil { + byzantineValidatorsCount++ + byzantineValidatorsPower += val.VotingPower + } + } + } + cs.metrics.ByzantineValidators.Set(float64(byzantineValidatorsCount)) + cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) + + if height > 1 { + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta != nil { + cs.metrics.BlockIntervalSeconds.Observe( + block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), + ) + } + } + + cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) + cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) + cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.CommittedHeight.Set(float64(block.Height)) +} + +//----------------------------------------------------------------------------- + +// NOTE: block is not necessarily valid. +// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, +// once we have the full block. +func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { + height, round, part := msg.Height, msg.Round, msg.Part + + // Blocks might be reused, so round mismatch is OK + if cs.Height != height { + cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round) + return false, nil + } + + // We're not expecting a block part. + if cs.ProposalBlockParts == nil { + // NOTE: this can happen when we've gone to a higher round and + // then receive parts from the previous round - not necessarily a bad peer. + cs.Logger.Info("Received a block part when we're not expecting any", + "height", height, "round", round, "index", part.Index, "peer", peerID) + return false, nil + } + + added, err = cs.ProposalBlockParts.AddPart(part) + if err != nil { + return added, err + } + if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes { + return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)", + cs.ProposalBlockParts.ByteSize(), cs.state.ConsensusParams.Block.MaxBytes, + ) + } + if added && cs.ProposalBlockParts.IsComplete() { + bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + if err != nil { + return added, err + } + + var pbb = new(tmproto.Block) + err = proto.Unmarshal(bz, pbb) + if err != nil { + return added, err + } + + block, err := types.BlockFromProto(pbb) + if err != nil { + return added, err + } + + cs.ProposalBlock = block + // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal + cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil { + cs.Logger.Error("Error publishing event complete proposal", "err", err) + } + + // Update Valid* if we can. + prevotes := cs.Votes.Prevotes(cs.Round) + blockID, hasTwoThirds := prevotes.TwoThirdsMajority() + if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info("Updating valid block to new proposal block", + "valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash()) + cs.ValidRound = cs.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } + // TODO: In case there is +2/3 majority in Prevotes set for some + // block and cs.ProposalBlock contains different block, either + // proposer is faulty or voting power of faulty processes is more + // than 1/3. We should trigger in the future accountability + // procedure at this point. + } + + if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { + // Move onto the next step + cs.enterPrevote(height, cs.Round) + if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added + cs.enterPrecommit(height, cs.Round) + } + } else if cs.Step == cstypes.RoundStepCommit { + // If we're waiting on the proposal block... + cs.tryFinalizeCommit(height) + } + return added, nil + } + return added, nil +} + +// Attempt to add the vote. if its a duplicate signature, dupeout the validator +func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { + added, err := cs.addVote(vote, peerID) + if err != nil { + // If the vote height is off, we'll just ignore it, + // But if it's a conflicting sig, add it to the cs.evpool. + // If it's otherwise invalid, punish peer. + // nolint: gocritic + if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { + if cs.privValidatorPubKey == nil { + return false, errPubKeyIsNotSet + } + + if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) { + cs.Logger.Error( + "Found conflicting vote from ourselves. Did you unsafe_reset a validator?", + "height", + vote.Height, + "round", + vote.Round, + "type", + vote.Type) + return added, err + } + var timestamp time.Time + if voteErr.VoteA.Height == cs.state.InitialHeight { + timestamp = cs.state.LastBlockTime // genesis time + } else { + timestamp = sm.MedianTime(cs.LastCommit.MakeCommit(), cs.LastValidators) + } + ev := types.NewDuplicateVoteEvidence(voteErr.VoteA, voteErr.VoteB, timestamp, cs.Validators) + evidenceErr := cs.evpool.AddEvidenceFromConsensus(ev) + if evidenceErr != nil { + cs.Logger.Error("Failed to add evidence to the evidence pool", "err", evidenceErr) + } + return added, err + } else if err == types.ErrVoteNonDeterministicSignature { + cs.Logger.Debug("Vote has non-deterministic signature", "err", err) + } else { + // Either + // 1) bad peer OR + // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR + // 3) tmkms use with multiple validators connecting to a single tmkms instance + // (https://github.com/tendermint/tendermint/issues/3839). + cs.Logger.Info("Error attempting to add vote", "err", err) + return added, ErrAddingVote + } + } + return added, nil +} + +//----------------------------------------------------------------------------- + +// CONTRACT: cs.privValidator is not nil. +func (cs *State) signVote( + msgType tmproto.SignedMsgType, + hash []byte, + header types.PartSetHeader, +) (*types.Vote, error) { + // Flush the WAL. Otherwise, we may not recompute the same vote to sign, + // and the privValidator will refuse to sign anything. + if err := cs.wal.FlushAndSync(); err != nil { + return nil, err + } + + if cs.privValidatorPubKey == nil { + return nil, errPubKeyIsNotSet + } + addr := cs.privValidatorPubKey.Address() + valIdx, _ := cs.Validators.GetByAddress(addr) + + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: valIdx, + Height: cs.Height, + Round: cs.Round, + Timestamp: cs.voteTime(), + Type: msgType, + BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, + } + v := vote.ToProto() + err := cs.privValidator.SignVote(cs.state.ChainID, v) + vote.Signature = v.Signature + + return vote, err +} + +func (cs *State) voteTime() time.Time { + now := tmtime.Now() + minVoteTime := now + // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, + // even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/. + timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond + if cs.LockedBlock != nil { + // See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html + minVoteTime = cs.LockedBlock.Time.Add(timeIota) + } else if cs.ProposalBlock != nil { + minVoteTime = cs.ProposalBlock.Time.Add(timeIota) + } + + if now.After(minVoteTime) { + return now + } + return minVoteTime +} + +// sign the vote and publish on internalMsgQueue +func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { + if cs.privValidator == nil { // the node does not have a key + return nil + } + + if cs.privValidatorPubKey == nil { + // Vote won't be signed, but it's not critical. + cs.Logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet)) + return nil + } + + // If the node not in the validator set, do nothing. + if !cs.Validators.HasAddress(cs.privValidatorPubKey.Address()) { + return nil + } + + // TODO: pass pubKey to signVote + vote, err := cs.signVote(msgType, hash, header) + if err == nil { + cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) + cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) + return vote + } + // if !cs.replayMode { + cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + //} + return nil +} + +// updatePrivValidatorPubKey get's the private validator public key and +// memoizes it. This func returns an error if the private validator is not +// responding or responds with an error. +func (cs *State) updatePrivValidatorPubKey() error { + if cs.privValidator == nil { + return nil + } + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return err + } + cs.privValidatorPubKey = pubKey + return nil +} + +// look back to check existence of the node's consensus votes before joining consensus +func (cs *State) checkDoubleSigningRisk(height int64) error { + if cs.privValidator != nil && cs.privValidatorPubKey != nil && cs.config.DoubleSignCheckHeight > 0 && height > 0 { + valAddr := cs.privValidatorPubKey.Address() + doubleSignCheckHeight := cs.config.DoubleSignCheckHeight + if doubleSignCheckHeight > height { + doubleSignCheckHeight = height + } + for i := int64(1); i < doubleSignCheckHeight; i++ { + lastCommit := cs.blockStore.LoadSeenCommit(height - i) + if lastCommit != nil { + for sigIdx, s := range lastCommit.Signatures { + if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { + cs.Logger.Info("Found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) + return ErrSignatureFoundInPastBlocks + } + } + } + } + } + return nil +} + +//--------------------------------------------------------- + +func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int { + if h1 < h2 { + return -1 + } else if h1 > h2 { + return 1 + } + if r1 < r2 { + return -1 + } else if r1 > r2 { + return 1 + } + if s1 < s2 { + return -1 + } else if s1 > s2 { + return 1 + } + return 0 +} + +// repairWalFile decodes messages from src (until the decoder errors) and +// writes them to dst. +func repairWalFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Open(dst) + if err != nil { + return err + } + defer out.Close() + + var ( + dec = NewWALDecoder(in) + enc = NewWALEncoder(out) + ) + + // best-case repair (until first error is encountered) + for { + msg, err := dec.Decode() + if err != nil { + break + } + + err = enc.Encode(msg) + if err != nil { + return fmt.Errorf("failed to encode msg: %w", err) + } + } + + return nil +} diff --git a/test/maverick/consensus/ticker.go b/test/maverick/consensus/ticker.go new file mode 100644 index 0000000000..3c6743f88e --- /dev/null +++ b/test/maverick/consensus/ticker.go @@ -0,0 +1,134 @@ +package consensus + +import ( + "time" + + "github.com/lazyledger/lazyledger-core/libs/log" + "github.com/lazyledger/lazyledger-core/libs/service" +) + +var ( + tickTockBufferSize = 10 +) + +// TimeoutTicker is a timer that schedules timeouts +// conditional on the height/round/step in the timeoutInfo. +// The timeoutInfo.Duration may be non-positive. +type TimeoutTicker interface { + Start() error + Stop() error + Chan() <-chan timeoutInfo // on which to receive a timeout + ScheduleTimeout(ti timeoutInfo) // reset the timer + + SetLogger(log.Logger) +} + +// timeoutTicker wraps time.Timer, +// scheduling timeouts only for greater height/round/step +// than what it's already seen. +// Timeouts are scheduled along the tickChan, +// and fired on the tockChan. +type timeoutTicker struct { + service.BaseService + + timer *time.Timer + tickChan chan timeoutInfo // for scheduling timeouts + tockChan chan timeoutInfo // for notifying about them +} + +// NewTimeoutTicker returns a new TimeoutTicker. +func NewTimeoutTicker() TimeoutTicker { + tt := &timeoutTicker{ + timer: time.NewTimer(0), + tickChan: make(chan timeoutInfo, tickTockBufferSize), + tockChan: make(chan timeoutInfo, tickTockBufferSize), + } + tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) + tt.stopTimer() // don't want to fire until the first scheduled timeout + return tt +} + +// OnStart implements service.Service. It starts the timeout routine. +func (t *timeoutTicker) OnStart() error { + + go t.timeoutRoutine() + + return nil +} + +// OnStop implements service.Service. It stops the timeout routine. +func (t *timeoutTicker) OnStop() { + t.BaseService.OnStop() + t.stopTimer() +} + +// Chan returns a channel on which timeouts are sent. +func (t *timeoutTicker) Chan() <-chan timeoutInfo { + return t.tockChan +} + +// ScheduleTimeout schedules a new timeout by sending on the internal tickChan. +// The timeoutRoutine is always available to read from tickChan, so this won't block. +// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step. +func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { + t.tickChan <- ti +} + +//------------------------------------------------------------- + +// stop the timer and drain if necessary +func (t *timeoutTicker) stopTimer() { + // Stop() returns false if it was already fired or was stopped + if !t.timer.Stop() { + select { + case <-t.timer.C: + default: + t.Logger.Debug("Timer already stopped") + } + } +} + +// send on tickChan to start a new timer. +// timers are interupted and replaced by new ticks from later steps +// timeouts of 0 on the tickChan will be immediately relayed to the tockChan +func (t *timeoutTicker) timeoutRoutine() { + t.Logger.Debug("Starting timeout routine") + var ti timeoutInfo + for { + select { + case newti := <-t.tickChan: + t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) + + // ignore tickers for old height/round/step + if newti.Height < ti.Height { + continue + } else if newti.Height == ti.Height { + if newti.Round < ti.Round { + continue + } else if newti.Round == ti.Round { + if ti.Step > 0 && newti.Step <= ti.Step { + continue + } + } + } + + // stop the last timer + t.stopTimer() + + // update timeoutInfo and reset timer + // NOTE time.Timer allows duration to be non-positive + ti = newti + t.timer.Reset(ti.Duration) + t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + case <-t.timer.C: + t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + // go routine here guarantees timeoutRoutine doesn't block. + // Determinism comes from playback in the receiveRoutine. + // We can eliminate it by merging the timeoutRoutine into receiveRoutine + // and managing the timeouts ourselves with a millisecond ticker + go func(toi timeoutInfo) { t.tockChan <- toi }(ti) + case <-t.Quit(): + return + } + } +} diff --git a/test/maverick/consensus/wal.go b/test/maverick/consensus/wal.go new file mode 100644 index 0000000000..bb0755a6d2 --- /dev/null +++ b/test/maverick/consensus/wal.go @@ -0,0 +1,437 @@ +package consensus + +import ( + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "path/filepath" + "time" + + "github.com/gogo/protobuf/proto" + + auto "github.com/lazyledger/lazyledger-core/libs/autofile" + // tmjson "github.com/lazyledger/lazyledger-core/libs/json" + "github.com/lazyledger/lazyledger-core/libs/log" + tmos "github.com/lazyledger/lazyledger-core/libs/os" + "github.com/lazyledger/lazyledger-core/libs/service" + tmcons "github.com/lazyledger/lazyledger-core/proto/tendermint/consensus" + tmtime "github.com/lazyledger/lazyledger-core/types/time" +) + +const ( + // time.Time + max consensus msg size + maxMsgSizeBytes = maxMsgSize + 24 + + // how often the WAL should be sync'd during period sync'ing + walDefaultFlushInterval = 2 * time.Second +) + +//-------------------------------------------------------- +// types and functions for savings consensus messages + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +type TimedWALMessage struct { + Time time.Time `json:"time"` + Msg WALMessage `json:"msg"` +} + +// EndHeightMessage marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +type EndHeightMessage struct { + Height int64 `json:"height"` +} + +type WALMessage interface{} + +// func init() { +// tmjson.RegisterType(msgInfo{}, "tendermint/wal/MsgInfo") +// tmjson.RegisterType(timeoutInfo{}, "tendermint/wal/TimeoutInfo") +// tmjson.RegisterType(EndHeightMessage{}, "tendermint/wal/EndHeightMessage") +// } + +//-------------------------------------------------------- +// Simple write-ahead logger + +// WAL is an interface for any write-ahead logger. +type WAL interface { + Write(WALMessage) error + WriteSync(WALMessage) error + FlushAndSync() error + + SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) + + // service methods + Start() error + Stop() error + Wait() +} + +// Write ahead logger writes msgs to disk before they are processed. +// Can be used for crash-recovery and deterministic replay. +// TODO: currently the wal is overwritten during replay catchup, give it a mode +// so it's either reading or appending - must read to end to start appending +// again. +type BaseWAL struct { + service.BaseService + + group *auto.Group + + enc *WALEncoder + + flushTicker *time.Ticker + flushInterval time.Duration +} + +var _ WAL = &BaseWAL{} + +// NewWAL returns a new write-ahead logger based on `baseWAL`, which implements +// WAL. It's flushed and synced to disk every 2s and once when stopped. +func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { + err := tmos.EnsureDir(filepath.Dir(walFile), 0700) + if err != nil { + return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) + } + + group, err := auto.OpenGroup(walFile, groupOptions...) + if err != nil { + return nil, err + } + wal := &BaseWAL{ + group: group, + enc: NewWALEncoder(group), + flushInterval: walDefaultFlushInterval, + } + wal.BaseService = *service.NewBaseService(nil, "baseWAL", wal) + return wal, nil +} + +// SetFlushInterval allows us to override the periodic flush interval for the WAL. +func (wal *BaseWAL) SetFlushInterval(i time.Duration) { + wal.flushInterval = i +} + +func (wal *BaseWAL) Group() *auto.Group { + return wal.group +} + +func (wal *BaseWAL) SetLogger(l log.Logger) { + wal.BaseService.Logger = l + wal.group.SetLogger(l) +} + +func (wal *BaseWAL) OnStart() error { + size, err := wal.group.Head.Size() + if err != nil { + return err + } else if size == 0 { + if err := wal.WriteSync(EndHeightMessage{0}); err != nil { + return err + } + } + err = wal.group.Start() + if err != nil { + return err + } + wal.flushTicker = time.NewTicker(wal.flushInterval) + go wal.processFlushTicks() + return nil +} + +func (wal *BaseWAL) processFlushTicks() { + for { + select { + case <-wal.flushTicker.C: + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error("Periodic WAL flush failed", "err", err) + } + case <-wal.Quit(): + return + } + } +} + +// FlushAndSync flushes and fsync's the underlying group's data to disk. +// See auto#FlushAndSync +func (wal *BaseWAL) FlushAndSync() error { + return wal.group.FlushAndSync() +} + +// Stop the underlying autofile group. +// Use Wait() to ensure it's finished shutting down +// before cleaning up files. +func (wal *BaseWAL) OnStop() { + wal.flushTicker.Stop() + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error("error on flush data to disk", "error", err) + } + if err := wal.group.Stop(); err != nil { + wal.Logger.Error("error trying to stop wal", "error", err) + } + wal.group.Close() +} + +// Wait for the underlying autofile group to finish shutting down +// so it's safe to cleanup files. +func (wal *BaseWAL) Wait() { + wal.group.Wait() +} + +// Write is called in newStep and for each receive on the +// peerMsgQueue and the timeoutTicker. +// NOTE: does not call fsync() +func (wal *BaseWAL) Write(msg WALMessage) error { + if wal == nil { + return nil + } + + if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil { + wal.Logger.Error("Error writing msg to consensus wal. WARNING: recover may not be possible for the current height", + "err", err, "msg", msg) + return err + } + + return nil +} + +// WriteSync is called when we receive a msg from ourselves +// so that we write to disk before sending signed messages. +// NOTE: calls fsync() +func (wal *BaseWAL) WriteSync(msg WALMessage) error { + if wal == nil { + return nil + } + + if err := wal.Write(msg); err != nil { + return err + } + + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error(`WriteSync failed to flush consensus wal. + WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted`, + "err", err) + return err + } + + return nil +} + +// WALSearchOptions are optional arguments to SearchForEndHeight. +type WALSearchOptions struct { + // IgnoreDataCorruptionErrors set to true will result in skipping data corruption errors. + IgnoreDataCorruptionErrors bool +} + +// SearchForEndHeight searches for the EndHeightMessage with the given height +// and returns an auto.GroupReader, whenever it was found or not and an error. +// Group reader will be nil if found equals false. +// +// CONTRACT: caller must close group reader. +func (wal *BaseWAL) SearchForEndHeight( + height int64, + options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + var ( + msg *TimedWALMessage + gr *auto.GroupReader + ) + lastHeightFound := int64(-1) + + // NOTE: starting from the last file in the group because we're usually + // searching for the last height. See replay.go + min, max := wal.group.MinIndex(), wal.group.MaxIndex() + wal.Logger.Info("Searching for height", "height", height, "min", min, "max", max) + for index := max; index >= min; index-- { + gr, err = wal.group.NewReader(index) + if err != nil { + return nil, false, err + } + + dec := NewWALDecoder(gr) + for { + msg, err = dec.Decode() + if err == io.EOF { + // OPTIMISATION: no need to look for height in older files if we've seen h < height + if lastHeightFound > 0 && lastHeightFound < height { + gr.Close() + return nil, false, nil + } + // check next file + break + } + if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) { + wal.Logger.Error("Corrupted entry. Skipping...", "err", err) + // do nothing + continue + } else if err != nil { + gr.Close() + return nil, false, err + } + + if m, ok := msg.Msg.(EndHeightMessage); ok { + lastHeightFound = m.Height + if m.Height == height { // found + wal.Logger.Info("Found", "height", height, "index", index) + return gr, true, nil + } + } + } + gr.Close() + } + + return nil, false, nil +} + +// ///////////////////////////////////////////////////////////////////////////// + +// A WALEncoder writes custom-encoded WAL messages to an output stream. +// +// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value +type WALEncoder struct { + wr io.Writer +} + +// NewWALEncoder returns a new encoder that writes to wr. +func NewWALEncoder(wr io.Writer) *WALEncoder { + return &WALEncoder{wr} +} + +// Encode writes the custom encoding of v to the stream. It returns an error if +// the encoded size of v is greater than 1MB. Any error encountered +// during the write is also returned. +func (enc *WALEncoder) Encode(v *TimedWALMessage) error { + pbMsg, err := WALToProto(v.Msg) + if err != nil { + return err + } + pv := tmcons.TimedWALMessage{ + Time: v.Time, + Msg: pbMsg, + } + + data, err := proto.Marshal(&pv) + if err != nil { + panic(fmt.Errorf("encode timed wall message failure: %w", err)) + } + + crc := crc32.Checksum(data, crc32c) + length := uint32(len(data)) + if length > maxMsgSizeBytes { + return fmt.Errorf("msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes) + } + totalLength := 8 + int(length) + + msg := make([]byte, totalLength) + binary.BigEndian.PutUint32(msg[0:4], crc) + binary.BigEndian.PutUint32(msg[4:8], length) + copy(msg[8:], data) + + _, err = enc.wr.Write(msg) + return err +} + +// ///////////////////////////////////////////////////////////////////////////// + +// IsDataCorruptionError returns true if data has been corrupted inside WAL. +func IsDataCorruptionError(err error) bool { + _, ok := err.(DataCorruptionError) + return ok +} + +// DataCorruptionError is an error that occures if data on disk was corrupted. +type DataCorruptionError struct { + cause error +} + +func (e DataCorruptionError) Error() string { + return fmt.Sprintf("DataCorruptionError[%v]", e.cause) +} + +func (e DataCorruptionError) Cause() error { + return e.cause +} + +// A WALDecoder reads and decodes custom-encoded WAL messages from an input +// stream. See WALEncoder for the format used. +// +// It will also compare the checksums and make sure data size is equal to the +// length from the header. If that is not the case, error will be returned. +type WALDecoder struct { + rd io.Reader +} + +// NewWALDecoder returns a new decoder that reads from rd. +func NewWALDecoder(rd io.Reader) *WALDecoder { + return &WALDecoder{rd} +} + +// Decode reads the next custom-encoded value from its reader and returns it. +func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { + b := make([]byte, 4) + + _, err := dec.rd.Read(b) + if errors.Is(err, io.EOF) { + return nil, err + } + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)} + } + crc := binary.BigEndian.Uint32(b) + + b = make([]byte, 4) + _, err = dec.rd.Read(b) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)} + } + length := binary.BigEndian.Uint32(b) + + if length > maxMsgSizeBytes { + return nil, DataCorruptionError{fmt.Errorf( + "length %d exceeded maximum possible value of %d bytes", + length, + maxMsgSizeBytes)} + } + + data := make([]byte, length) + n, err := dec.rd.Read(data) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v (read: %d, wanted: %d)", err, n, length)} + } + + // check checksum before decoding data + actualCRC := crc32.Checksum(data, crc32c) + if actualCRC != crc { + return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)} + } + + var res = new(tmcons.TimedWALMessage) + err = proto.Unmarshal(data, res) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} + } + + walMsg, err := WALFromProto(res.Msg) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to convert from proto: %w", err)} + } + tMsgWal := &TimedWALMessage{ + Time: res.Time, + Msg: walMsg, + } + + return tMsgWal, err +} + +type nilWAL struct{} + +var _ WAL = nilWAL{} + +func (nilWAL) Write(m WALMessage) error { return nil } +func (nilWAL) WriteSync(m WALMessage) error { return nil } +func (nilWAL) FlushAndSync() error { return nil } +func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + return nil, false, nil +} +func (nilWAL) Start() error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/test/maverick/consensus/wal_fuzz.go b/test/maverick/consensus/wal_fuzz.go new file mode 100644 index 0000000000..e15097c305 --- /dev/null +++ b/test/maverick/consensus/wal_fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package consensus + +import ( + "bytes" + "io" +) + +func Fuzz(data []byte) int { + dec := NewWALDecoder(bytes.NewReader(data)) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } + if err != nil { + if msg != nil { + panic("msg != nil on error") + } + return 0 + } + var w bytes.Buffer + enc := NewWALEncoder(&w) + err = enc.Encode(msg) + if err != nil { + panic(err) + } + } + return 1 +} diff --git a/test/maverick/consensus/wal_generator.go b/test/maverick/consensus/wal_generator.go new file mode 100644 index 0000000000..e2a905c288 --- /dev/null +++ b/test/maverick/consensus/wal_generator.go @@ -0,0 +1,232 @@ +package consensus + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "testing" + "time" + + db "github.com/tendermint/tm-db" + + "github.com/lazyledger/lazyledger-core/abci/example/kvstore" + cfg "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/libs/log" + tmrand "github.com/lazyledger/lazyledger-core/libs/rand" + "github.com/lazyledger/lazyledger-core/privval" + "github.com/lazyledger/lazyledger-core/proxy" + sm "github.com/lazyledger/lazyledger-core/state" + "github.com/lazyledger/lazyledger-core/store" + "github.com/lazyledger/lazyledger-core/types" +) + +// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a +// stripped down version of node (proxy app, event bus, consensus state) with a +// persistent kvstore application and special consensus wal instance +// (byteBufferWAL) and waits until numBlocks are created. +// If the node fails to produce given numBlocks, it returns an error. +func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { + config := getConfig(t) + + app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) + + logger := log.TestingLogger().With("wal_generator", "wal_generator") + logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) + + // /////////////////////////////////////////////////////////////////////////// + // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS + // NOTE: we can't import node package because of circular dependency. + // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. + privValidatorKeyFile := config.PrivValidatorKeyFile() + privValidatorStateFile := config.PrivValidatorStateFile() + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + if err != nil { + return err + } + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return fmt.Errorf("failed to read genesis file: %w", err) + } + blockStoreDB := db.NewMemDB() + stateDB := blockStoreDB + stateStore := sm.NewStore(stateDB) + state, err := sm.MakeGenesisState(genDoc) + if err != nil { + return fmt.Errorf("failed to make genesis state: %w", err) + } + state.Version.Consensus.App = kvstore.ProtocolVersion + if err = stateStore.Save(state); err != nil { + t.Error(err) + } + + blockStore := store.NewBlockStore(blockStoreDB) + + proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return fmt.Errorf("failed to start proxy app connections: %w", err) + } + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) + + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + if err := eventBus.Start(); err != nil { + return fmt.Errorf("failed to start event bus: %w", err) + } + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + mempool := emptyMempool{} + evpool := sm.EmptyEvidencePool{} + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + consensusState := NewState(config.Consensus, state.Copy(), + blockExec, blockStore, mempool, evpool, map[int64]Misbehavior{}) + consensusState.SetLogger(logger) + consensusState.SetEventBus(eventBus) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + // END OF COPY PASTE + // /////////////////////////////////////////////////////////////////////////// + + // set consensus wal to buffered WAL, which will write all incoming msgs to buffer + numBlocksWritten := make(chan struct{}) + wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) + // see wal.go#103 + if err := wal.Write(EndHeightMessage{0}); err != nil { + t.Error(err) + } + + consensusState.wal = wal + + if err := consensusState.Start(); err != nil { + return fmt.Errorf("failed to start consensus state: %w", err) + } + + select { + case <-numBlocksWritten: + if err := consensusState.Stop(); err != nil { + t.Error(err) + } + return nil + case <-time.After(1 * time.Minute): + if err := consensusState.Stop(); err != nil { + t.Error(err) + } + return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + } +} + +// WALWithNBlocks returns a WAL content with numBlocks. +func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { + var b bytes.Buffer + wr := bufio.NewWriter(&b) + + if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil { + return []byte{}, err + } + + wr.Flush() + return b.Bytes(), nil +} + +func randPort() int { + // returns between base and base + spread + base, spread := 20000, 20000 + return base + tmrand.Intn(spread) +} + +func makeAddrs() (string, string, string) { + start := randPort() + return fmt.Sprintf("tcp://127.0.0.1:%d", start), + fmt.Sprintf("tcp://127.0.0.1:%d", start+1), + fmt.Sprintf("tcp://127.0.0.1:%d", start+2) +} + +// getConfig returns a config for test cases +func getConfig(t *testing.T) *cfg.Config { + c := cfg.ResetTestRoot(t.Name()) + + // and we use random ports to run in parallel + tm, rpc, grpc := makeAddrs() + c.P2P.ListenAddress = tm + c.RPC.ListenAddress = rpc + c.RPC.GRPCListenAddress = grpc + return c +} + +// byteBufferWAL is a WAL which writes all msgs to a byte buffer. Writing stops +// when the heightToStop is reached. Client will be notified via +// signalWhenStopsTo channel. +type byteBufferWAL struct { + enc *WALEncoder + stopped bool + heightToStop int64 + signalWhenStopsTo chan<- struct{} + + logger log.Logger +} + +// needed for determinism +var fixedTime, _ = time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") + +func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalStop chan<- struct{}) *byteBufferWAL { + return &byteBufferWAL{ + enc: enc, + heightToStop: nBlocks, + signalWhenStopsTo: signalStop, + logger: logger, + } +} + +// Save writes message to the internal buffer except when heightToStop is +// reached, in which case it will signal the caller via signalWhenStopsTo and +// skip writing. +func (w *byteBufferWAL) Write(m WALMessage) error { + if w.stopped { + w.logger.Debug("WAL already stopped. Not writing message", "msg", m) + return nil + } + + if endMsg, ok := m.(EndHeightMessage); ok { + w.logger.Debug("WAL write end height message", "height", endMsg.Height, "stopHeight", w.heightToStop) + if endMsg.Height == w.heightToStop { + w.logger.Debug("Stopping WAL at height", "height", endMsg.Height) + w.signalWhenStopsTo <- struct{}{} + w.stopped = true + return nil + } + } + + w.logger.Debug("WAL Write Message", "msg", m) + err := w.enc.Encode(&TimedWALMessage{fixedTime, m}) + if err != nil { + panic(fmt.Sprintf("failed to encode the msg %v", m)) + } + + return nil +} + +func (w *byteBufferWAL) WriteSync(m WALMessage) error { + return w.Write(m) +} + +func (w *byteBufferWAL) FlushAndSync() error { return nil } + +func (w *byteBufferWAL) SearchForEndHeight( + height int64, + options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + return nil, false, nil +} + +func (w *byteBufferWAL) Start() error { return nil } +func (w *byteBufferWAL) Stop() error { return nil } +func (w *byteBufferWAL) Wait() {} diff --git a/test/maverick/main.go b/test/maverick/main.go new file mode 100644 index 0000000000..8e8df2c14d --- /dev/null +++ b/test/maverick/main.go @@ -0,0 +1,251 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + cmd "github.com/lazyledger/lazyledger-core/cmd/tendermint/commands" + "github.com/lazyledger/lazyledger-core/cmd/tendermint/commands/debug" + cfg "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/libs/cli" + tmflags "github.com/lazyledger/lazyledger-core/libs/cli/flags" + "github.com/lazyledger/lazyledger-core/libs/log" + tmos "github.com/lazyledger/lazyledger-core/libs/os" + tmrand "github.com/lazyledger/lazyledger-core/libs/rand" + "github.com/lazyledger/lazyledger-core/p2p" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + cs "github.com/lazyledger/lazyledger-core/test/maverick/consensus" + nd "github.com/lazyledger/lazyledger-core/test/maverick/node" + "github.com/lazyledger/lazyledger-core/types" + tmtime "github.com/lazyledger/lazyledger-core/types/time" +) + +var ( + config = cfg.DefaultConfig() + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + misbehaviorFlag = "" +) + +func init() { + registerFlagsRootCmd(RootCmd) +} + +func registerFlagsRootCmd(command *cobra.Command) { + command.PersistentFlags().String("log_level", config.LogLevel, "Log level") +} + +func ParseConfig() (*cfg.Config, error) { + conf := cfg.DefaultConfig() + err := viper.Unmarshal(conf) + if err != nil { + return nil, err + } + conf.SetRoot(conf.RootDir) + cfg.EnsureRoot(conf.RootDir) + if err = conf.ValidateBasic(); err != nil { + return nil, fmt.Errorf("error in config file: %v", err) + } + return conf, err +} + +// RootCmd is the root command for Tendermint core. +var RootCmd = &cobra.Command{ + Use: "maverick", + Short: "Tendermint Maverick Node", + Long: "Tendermint Maverick Node for testing with faulty consensus misbehaviors in a testnet. Contains " + + "all the functionality of a normal node but custom misbehaviors can be injected when running the node " + + "through a flag. See maverick node --help for how the misbehavior flag is constructured", + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + fmt.Printf("use: %v, args: %v", cmd.Use, cmd.Args) + config, err = ParseConfig() + if err != nil { + return err + } + if config.LogFormat == cfg.LogFormatJSON { + logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) + } + logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) + if err != nil { + return err + } + if viper.GetBool(cli.TraceFlag) { + logger = log.NewTracingLogger(logger) + } + logger = logger.With("module", "main") + return nil + }, +} + +func main() { + rootCmd := RootCmd + rootCmd.AddCommand( + ListMisbehaviorCmd, + cmd.GenValidatorCmd, + InitFilesCmd, + cmd.ProbeUpnpCmd, + cmd.ReplayCmd, + cmd.ReplayConsoleCmd, + cmd.ResetAllCmd, + cmd.ResetPrivValidatorCmd, + cmd.ShowValidatorCmd, + cmd.ShowNodeIDCmd, + cmd.GenNodeKeyCmd, + cmd.VersionCmd, + debug.DebugCmd, + cli.NewCompletionCmd(rootCmd, true), + ) + + nodeCmd := &cobra.Command{ + Use: "start", + Aliases: []string{"node", "run"}, + Short: "Run the maverick node", + RunE: func(command *cobra.Command, args []string) error { + return startNode(config, logger, misbehaviorFlag) + }, + } + + cmd.AddNodeFlags(nodeCmd) + + // Create & start node + rootCmd.AddCommand(nodeCmd) + + // add special flag for misbehaviors + nodeCmd.Flags().StringVar( + &misbehaviorFlag, + "misbehaviors", + "", + "Select the misbehaviors of the node (comma-separated, no spaces in between): \n"+ + "e.g. --misbehaviors double-prevote,3\n"+ + "You can also have multiple misbehaviors: e.g. double-prevote,3,no-vote,5") + + cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) + if err := cmd.Execute(); err != nil { + panic(err) + } +} + +func startNode(config *cfg.Config, logger log.Logger, misbehaviorFlag string) error { + misbehaviors, err := nd.ParseMisbehaviors(misbehaviorFlag) + if err != nil { + return err + } + + node, err := nd.DefaultNewNode(config, logger, misbehaviors) + if err != nil { + return fmt.Errorf("failed to create node: %w", err) + } + + if err := node.Start(); err != nil { + return fmt.Errorf("failed to start node: %w", err) + } + + logger.Info("Started node", "nodeInfo", node.Switch().NodeInfo()) + + // Stop upon receiving SIGTERM or CTRL-C. + tmos.TrapSignal(logger, func() { + if node.IsRunning() { + if err := node.Stop(); err != nil { + logger.Error("unable to stop the node", "error", err) + } + } + }) + + // Run forever. + select {} +} + +var keyType string + +var InitFilesCmd = &cobra.Command{ + Use: "init", + Short: "Initialize Tendermint", + RunE: initFiles, +} + +func init() { + InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") +} + +func initFiles(cmd *cobra.Command, args []string) error { + return initFilesWithConfig(config) +} + +func initFilesWithConfig(config *cfg.Config) error { + // private validator + privValKeyFile := config.PrivValidatorKeyFile() + privValStateFile := config.PrivValidatorStateFile() + var pv *nd.FilePV + if tmos.FileExists(privValKeyFile) { + pv = nd.LoadFilePV(privValKeyFile, privValStateFile) + logger.Info("Found private validator", "keyFile", privValKeyFile, + "stateFile", privValStateFile) + } else { + pv = nd.GenFilePV(privValKeyFile, privValStateFile) + pv.Save() + logger.Info("Generated private validator", "keyFile", privValKeyFile, + "stateFile", privValStateFile) + } + + nodeKeyFile := config.NodeKeyFile() + if tmos.FileExists(nodeKeyFile) { + logger.Info("Found node key", "path", nodeKeyFile) + } else { + if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil { + return err + } + logger.Info("Generated node key", "path", nodeKeyFile) + } + + // genesis file + genFile := config.GenesisFile() + if tmos.FileExists(genFile) { + logger.Info("Found genesis file", "path", genFile) + } else { + genDoc := types.GenesisDoc{ + ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), + GenesisTime: tmtime.Now(), + ConsensusParams: types.DefaultConsensusParams(), + } + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } + } + pubKey, err := pv.GetPubKey() + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + genDoc.Validators = []types.GenesisValidator{{ + Address: pubKey.Address(), + PubKey: pubKey, + Power: 10, + }} + + if err := genDoc.SaveAs(genFile); err != nil { + return err + } + logger.Info("Generated genesis file", "path", genFile) + } + + return nil +} + +var ListMisbehaviorCmd = &cobra.Command{ + Use: "misbehaviors", + Short: "Lists possible misbehaviors", + RunE: listMisbehaviors, +} + +func listMisbehaviors(cmd *cobra.Command, args []string) error { + str := "Currently registered misbehaviors: \n" + for key := range cs.MisbehaviorList { + str += fmt.Sprintf("- %s\n", key) + } + fmt.Println(str) + return nil +} diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go new file mode 100644 index 0000000000..96857ab9bf --- /dev/null +++ b/test/maverick/node/node.go @@ -0,0 +1,1454 @@ +package node + +import ( + "bytes" + "context" + "errors" + "fmt" + "net" + "net/http" + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/cors" + + dbm "github.com/tendermint/tm-db" + + abci "github.com/lazyledger/lazyledger-core/abci/types" + bcv0 "github.com/lazyledger/lazyledger-core/blockchain/v0" + bcv2 "github.com/lazyledger/lazyledger-core/blockchain/v2" + cfg "github.com/lazyledger/lazyledger-core/config" + "github.com/lazyledger/lazyledger-core/consensus" + "github.com/lazyledger/lazyledger-core/crypto" + "github.com/lazyledger/lazyledger-core/evidence" + tmjson "github.com/lazyledger/lazyledger-core/libs/json" + "github.com/lazyledger/lazyledger-core/libs/log" + tmpubsub "github.com/lazyledger/lazyledger-core/libs/pubsub" + "github.com/lazyledger/lazyledger-core/libs/service" + "github.com/lazyledger/lazyledger-core/light" + mempl "github.com/lazyledger/lazyledger-core/mempool" + "github.com/lazyledger/lazyledger-core/p2p" + "github.com/lazyledger/lazyledger-core/p2p/pex" + "github.com/lazyledger/lazyledger-core/privval" + "github.com/lazyledger/lazyledger-core/proxy" + rpccore "github.com/lazyledger/lazyledger-core/rpc/core" + grpccore "github.com/lazyledger/lazyledger-core/rpc/grpc" + rpcserver "github.com/lazyledger/lazyledger-core/rpc/jsonrpc/server" + sm "github.com/lazyledger/lazyledger-core/state" + "github.com/lazyledger/lazyledger-core/state/txindex" + "github.com/lazyledger/lazyledger-core/state/txindex/kv" + "github.com/lazyledger/lazyledger-core/state/txindex/null" + "github.com/lazyledger/lazyledger-core/statesync" + "github.com/lazyledger/lazyledger-core/store" + cs "github.com/lazyledger/lazyledger-core/test/maverick/consensus" + "github.com/lazyledger/lazyledger-core/types" + tmtime "github.com/lazyledger/lazyledger-core/types/time" + "github.com/lazyledger/lazyledger-core/version" +) + +//------------------------------------------------------------------------------ + +// ParseMisbehaviors is a util function that converts a comma separated string into +// a map of misbehaviors to be executed by the maverick node +func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) { + // check if string is empty in which case we run a normal node + var misbehaviors = make(map[int64]cs.Misbehavior) + if str == "" { + return misbehaviors, nil + } + strs := strings.Split(str, ",") + if len(strs)%2 != 0 { + return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag") + } +OUTER_LOOP: + for i := 0; i < len(strs); i += 2 { + height, err := strconv.ParseInt(strs[i+1], 10, 64) + if err != nil { + return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err) + } + for key, misbehavior := range cs.MisbehaviorList { + if key == strs[i] { + misbehaviors[height] = misbehavior + continue OUTER_LOOP + } + } + return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i]) + } + + return misbehaviors, nil +} + +// DBContext specifies config information for loading a new DB. +type DBContext struct { + ID string + Config *cfg.Config +} + +// DBProvider takes a DBContext and returns an instantiated DB. +type DBProvider func(*DBContext) (dbm.DB, error) + +// DefaultDBProvider returns a database using the DBBackend and DBDir +// specified in the ctx.Config. +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +} + +// GenesisDocProvider returns a GenesisDoc. +// It allows the GenesisDoc to be pulled from sources other than the +// filesystem, for instance from a distributed key-value store cluster. +type GenesisDocProvider func() (*types.GenesisDoc, error) + +// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads +// the GenesisDoc from the config.GenesisFile() on the filesystem. +func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { + return func() (*types.GenesisDoc, error) { + return types.GenesisDocFromFile(config.GenesisFile()) + } +} + +// Provider takes a config and a logger and returns a ready to go Node. +type Provider func(*cfg.Config, log.Logger) (*Node, error) + +// DefaultNewNode returns a Tendermint node with default settings for the +// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. +// It implements NodeProvider. +func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) { + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err) + } + + return NewNode(config, + LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + nodeKey, + proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), + DefaultGenesisDocProviderFunc(config), + DefaultDBProvider, + DefaultMetricsProvider(config.Instrumentation), + logger, + misbehaviors, + ) + +} + +// MetricsProvider returns a consensus, p2p and mempool Metrics. +type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) + +// DefaultMetricsProvider returns Metrics build using Prometheus client library +// if Prometheus is enabled. Otherwise, it returns no-op Metrics. +func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { + return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { + if config.Prometheus { + return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), + p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), + mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), + sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) + } + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() + } +} + +// Option sets a parameter for the node. +type Option func(*Node) + +// Temporary interface for switching to fast sync, we should get rid of v0. +// See: https://github.com/tendermint/tendermint/issues/4595 +type fastSyncReactor interface { + SwitchToFastSync(sm.State) error +} + +// CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to +// the node's Switch. +// +// WARNING: using any name from the below list of the existing reactors will +// result in replacing it with the custom one. +// +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +// - STATESYNC +func CustomReactors(reactors map[string]p2p.Reactor) Option { + return func(n *Node) { + for name, reactor := range reactors { + if existingReactor := n.sw.Reactor(name); existingReactor != nil { + n.sw.Logger.Info("Replacing existing reactor with a custom one", + "name", name, "existing", existingReactor, "custom", reactor) + n.sw.RemoveReactor(name, existingReactor) + } + n.sw.AddReactor(name, reactor) + } + } +} + +func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option { + return func(n *Node) { + for name, customReactor := range reactors { + if existingReactor := n.sw.Reactor(name); existingReactor != nil { + n.sw.Logger.Info("Replacing existing reactor with a custom one", + "name", name) + n.sw.RemoveReactor(name, existingReactor) + } + n.sw.AddReactor(name, customReactor(n)) + } + } +} + +// StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and +// build a State object for bootstrapping the node. +// WARNING: this interface is considered unstable and subject to change. +func StateProvider(stateProvider statesync.StateProvider) Option { + return func(n *Node) { + n.stateSyncProvider = stateProvider + } +} + +//------------------------------------------------------------------------------ + +// Node is the highest level interface to a full Tendermint node. +// It includes all configuration information and running services. +type Node struct { + service.BaseService + + // config + config *cfg.Config + genesisDoc *types.GenesisDoc // initial validator set + privValidator types.PrivValidator // local node's validator key + + // network + transport *p2p.MultiplexTransport + sw *p2p.Switch // p2p connections + addrBook pex.AddrBook // known peers + nodeInfo p2p.NodeInfo + nodeKey p2p.NodeKey // our node privkey + isListening bool + + // services + eventBus *types.EventBus // pub/sub for services + stateStore sm.Store + blockStore *store.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for fast-syncing + mempoolReactor *mempl.Reactor // for gossipping transactions + mempool mempl.Mempool + stateSync bool // whether the node should state sync on startup + stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots + stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node + stateSyncGenesis sm.State // provides the genesis state for state sync + consensusState *cs.State // latest consensus state + consensusReactor *cs.Reactor // for participating in the consensus + pexReactor *pex.Reactor // for exchanging peer addresses + evidencePool *evidence.Pool // tracking evidence + proxyApp proxy.AppConns // connection to the application + rpcListeners []net.Listener // rpc servers + txIndexer txindex.TxIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server +} + +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { + var blockStoreDB dbm.DB + blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) + if err != nil { + return + } + blockStore = store.NewBlockStore(blockStoreDB) + + stateDB, err = dbProvider(&DBContext{"state", config}) + if err != nil { + return + } + + return +} + +func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { + proxyApp := proxy.NewAppConns(clientCreator) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return nil, fmt.Errorf("error starting proxy app connections: %v", err) + } + return proxyApp, nil +} + +func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + if err := eventBus.Start(); err != nil { + return nil, err + } + return eventBus, nil +} + +func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, + eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) { + + var txIndexer txindex.TxIndexer + switch config.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&DBContext{"tx_index", config}) + if err != nil { + return nil, nil, err + } + txIndexer = kv.NewTxIndex(store) + default: + txIndexer = &null.TxIndex{} + } + + indexerService := txindex.NewIndexerService(txIndexer, eventBus) + indexerService.SetLogger(logger.With("module", "txindex")) + if err := indexerService.Start(); err != nil { + return nil, nil, err + } + return indexerService, txIndexer, nil +} + +func doHandshake( + stateStore sm.Store, + state sm.State, + blockStore sm.BlockStore, + genDoc *types.GenesisDoc, + eventBus types.BlockEventPublisher, + proxyApp proxy.AppConns, + consensusLogger log.Logger) error { + + handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) + handshaker.SetLogger(consensusLogger) + handshaker.SetEventBus(eventBus) + if err := handshaker.Handshake(proxyApp); err != nil { + return fmt.Errorf("error during handshake: %v", err) + } + return nil +} + +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { + // Log the version info. + logger.Info("Version info", + "software", version.TMCoreSemVer, + "block", version.BlockProtocol, + "p2p", version.P2PProtocol, + ) + + // If the state and software differ in block version, at least log it. + if state.Version.Consensus.Block != version.BlockProtocol { + logger.Info("Software and state have different block protocols", + "software", version.BlockProtocol, + "state", state.Version.Consensus.Block, + ) + } + + addr := pubKey.Address() + // Log whether this node is a validator or an observer + if state.Validators.HasAddress(addr) { + consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) + } else { + consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) + } +} + +func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { + if state.Validators.Size() > 1 { + return false + } + addr, _ := state.Validators.GetByIndex(0) + return bytes.Equal(pubKey.Address(), addr) +} + +func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, + state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) { + + mempool := mempl.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempl.WithMetrics(memplMetrics), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), + ) + mempoolLogger := logger.With("module", "mempool") + mempoolReactor := mempl.NewReactor(config.Mempool, mempool) + mempoolReactor.SetLogger(mempoolLogger) + + if config.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() + } + return mempoolReactor, mempool +} + +func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, + stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { + + evidenceDB, err := dbProvider(&DBContext{"evidence", config}) + if err != nil { + return nil, nil, err + } + evidenceLogger := logger.With("module", "evidence") + evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + if err != nil { + return nil, nil, err + } + evidenceReactor := evidence.NewReactor(evidencePool) + evidenceReactor.SetLogger(evidenceLogger) + return evidenceReactor, evidencePool, nil +} + +func createBlockchainReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore *store.BlockStore, + fastSync bool, + logger log.Logger) (bcReactor p2p.Reactor, err error) { + + switch config.FastSync.Version { + case "v0": + bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + case "v2": + bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + } + + bcReactor.SetLogger(logger.With("module", "blockchain")) + return bcReactor, nil +} + +func createConsensusReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + mempool *mempl.CListMempool, + evidencePool *evidence.Pool, + privValidator types.PrivValidator, + csMetrics *cs.Metrics, + waitSync bool, + eventBus *types.EventBus, + consensusLogger log.Logger, + misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) { + + consensusState := cs.NewState( + config.Consensus, + state.Copy(), + blockExec, + blockStore, + mempool, + evidencePool, + misbehaviors, + cs.StateMetrics(csMetrics), + ) + consensusState.SetLogger(consensusLogger) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) + consensusReactor.SetLogger(consensusLogger) + // services which will be publishing and/or subscribing for messages (events) + // consensusReactor will set it on consensusState and blockExecutor + consensusReactor.SetEventBus(eventBus) + return consensusReactor, consensusState +} + +func createTransport( + config *cfg.Config, + nodeInfo p2p.NodeInfo, + nodeKey p2p.NodeKey, + proxyApp proxy.AppConns, +) ( + *p2p.MultiplexTransport, + []p2p.PeerFilterFunc, +) { + var ( + mConnConfig = p2p.MConnConfig(config.P2P) + transport = p2p.NewMultiplexTransport(nodeInfo, nodeKey, mConnConfig) + connFilters = []p2p.ConnFilterFunc{} + peerFilters = []p2p.PeerFilterFunc{} + ) + + if !config.P2P.AllowDuplicateIP { + connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) + } + + // Filter peers by addr or pubkey with an ABCI query. + // If the query return code is OK, add peer. + if config.FilterPeers { + connFilters = append( + connFilters, + // ABCI query for address filtering. + func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { + res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("error querying abci app: %v", res) + } + + return nil + }, + ) + + peerFilters = append( + peerFilters, + // ABCI query for ID filtering. + func(_ p2p.IPeerSet, p p2p.Peer) error { + res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("error querying abci app: %v", res) + } + + return nil + }, + ) + } + + p2p.MultiplexTransportConnFilters(connFilters...)(transport) + + // Limit the number of incoming connections. + max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + p2p.MultiplexTransportMaxIncomingConnections(max)(transport) + + return transport, peerFilters +} + +func createSwitch(config *cfg.Config, + transport p2p.Transport, + p2pMetrics *p2p.Metrics, + peerFilters []p2p.PeerFilterFunc, + mempoolReactor *mempl.Reactor, + bcReactor p2p.Reactor, + stateSyncReactor *p2p.ReactorShim, + consensusReactor *cs.Reactor, + evidenceReactor *evidence.Reactor, + nodeInfo p2p.NodeInfo, + nodeKey p2p.NodeKey, + p2pLogger log.Logger) *p2p.Switch { + + sw := p2p.NewSwitch( + config.P2P, + transport, + p2p.WithMetrics(p2pMetrics), + p2p.SwitchPeerFilters(peerFilters...), + ) + sw.SetLogger(p2pLogger) + sw.AddReactor("MEMPOOL", mempoolReactor) + sw.AddReactor("BLOCKCHAIN", bcReactor) + sw.AddReactor("CONSENSUS", consensusReactor) + sw.AddReactor("EVIDENCE", evidenceReactor) + sw.AddReactor("STATESYNC", stateSyncReactor) + + sw.SetNodeInfo(nodeInfo) + sw.SetNodeKey(nodeKey) + + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile()) + return sw +} + +func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, + p2pLogger log.Logger, nodeKey p2p.NodeKey) (pex.AddrBook, error) { + + addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + + // Add ourselves to addrbook to prevent dialing ourselves + if config.P2P.ExternalAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ExternalAddress)) + if err != nil { + return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) + } + addrBook.AddOurAddress(addr) + } + if config.P2P.ListenAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ListenAddress)) + if err != nil { + return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) + } + addrBook.AddOurAddress(addr) + } + + sw.SetAddrBook(addrBook) + + return addrBook, nil +} + +func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, + sw *p2p.Switch, logger log.Logger) *pex.Reactor { + + // TODO persistent peers ? so we can have their DNS addrs saved + pexReactor := pex.NewReactor(addrBook, + &pex.ReactorConfig{ + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, + PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, + }) + pexReactor.SetLogger(logger.With("module", "pex")) + sw.AddReactor("PEX", pexReactor) + return pexReactor +} + +// startStateSync starts an asynchronous state sync process, then switches to fast sync mode. +func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, + stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, + stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { + ssR.Logger.Info("Starting state sync") + + if stateProvider == nil { + var err error + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stateProvider, err = statesync.NewLightClientStateProvider( + ctx, + state.ChainID, state.Version, state.InitialHeight, + config.RPCServers, light.TrustOptions{ + Period: config.TrustPeriod, + Height: config.TrustHeight, + Hash: config.TrustHashBytes(), + }, ssR.Logger.With("module", "light")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + } + + go func() { + state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) + if err != nil { + ssR.Logger.Error("State sync failed", "err", err) + return + } + err = stateStore.Bootstrap(state) + if err != nil { + ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) + return + } + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + ssR.Logger.Error("Failed to store last seen commit", "err", err) + return + } + + if fastSync { + // FIXME Very ugly to have these metrics bleed through here. + conR.Metrics.StateSyncing.Set(0) + conR.Metrics.FastSyncing.Set(1) + err = bcR.SwitchToFastSync(state) + if err != nil { + ssR.Logger.Error("Failed to switch to fast sync", "err", err) + return + } + } else { + conR.SwitchToConsensus(state, true) + } + }() + return nil +} + +// NewNode returns a new, ready to go, Tendermint Node. +func NewNode(config *cfg.Config, + privValidator types.PrivValidator, + nodeKey p2p.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider DBProvider, + metricsProvider MetricsProvider, + logger log.Logger, + misbehaviors map[int64]cs.Misbehavior, + options ...Option) (*Node, error) { + + blockStore, stateDB, err := initDBs(config, dbProvider) + if err != nil { + return nil, err + } + + stateStore := sm.NewStore(stateDB) + + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) + if err != nil { + return nil, err + } + + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). + proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + if err != nil { + return nil, err + } + + // EventBus and IndexerService must be started before the handshake because + // we might need to index the txs of the replayed block as this might not have happened + // when the node stopped last time (i.e. the node stopped after it saved the block + // but before it indexed the txs, or, endblocker panicked) + eventBus, err := createAndStartEventBus(logger) + if err != nil { + return nil, err + } + + // Transaction indexing + indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger) + if err != nil { + return nil, err + } + + // If an address is provided, listen on the socket for a connection from an + // external signing process. + if config.PrivValidatorListenAddr != "" { + // FIXME: we should start services inside OnStart + privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger) + if err != nil { + return nil, fmt.Errorf("error with private validator socket client: %w", err) + } + } + + pubKey, err := privValidator.GetPubKey() + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + // Determine whether we should do state and/or fast sync. + // We don't fast-sync when the only validator is us. + fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + if stateSync && state.LastBlockHeight > 0 { + logger.Info("Found local state with non-zero height, skipping state sync") + stateSync = false + } + + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + if !stateSync { + if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state, err = stateStore.Load() + if err != nil { + return nil, fmt.Errorf("cannot load state: %w", err) + } + } + + logNodeStartupInfo(state, pubKey, logger, consensusLogger) + + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) + + // Make MempoolReactor + mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) + + // Make Evidence Reactor + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) + if err != nil { + return nil, err + } + + // make block executor for consensus and blockchain reactors to execute blocks + blockExec := sm.NewBlockExecutor( + stateStore, + logger.With("module", "state"), + proxyApp.Consensus(), + mempool, + evidencePool, + sm.BlockExecutorWithMetrics(smMetrics), + ) + + // Make BlockchainReactor. Don't start fast sync if we're doing a state sync first. + bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger) + if err != nil { + return nil, fmt.Errorf("could not create blockchain reactor: %w", err) + } + + // Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first. + // FIXME We need to update metrics here, since other reactors don't have access to them. + if stateSync { + csMetrics.StateSyncing.Set(1) + } else if fastSync { + csMetrics.FastSyncing.Set(1) + } + + logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors) + consensusReactor, consensusState := createConsensusReactor( + config, state, blockExec, blockStore, mempool, evidencePool, + privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors) + + // Set up state sync reactor, and schedule a sync if requested. + // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, + // we should clean this whole thing up. See: + // https://github.com/tendermint/tendermint/issues/4644 + stateSyncReactorShim := p2p.NewReactorShim("StateSyncShim", statesync.ChannelShims) + stateSyncReactorShim.SetLogger(logger.With("module", "statesync")) + + stateSyncReactor := statesync.NewReactor( + stateSyncReactorShim.Logger, + proxyApp.Snapshot(), + proxyApp.Query(), + stateSyncReactorShim.GetChannel(statesync.SnapshotChannel), + stateSyncReactorShim.GetChannel(statesync.ChunkChannel), + stateSyncReactorShim.PeerUpdates, + config.StateSync.TempDir, + ) + + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport. + transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + + // Setup Switch. + p2pLogger := logger.With("module", "p2p") + sw := createSwitch( + config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, + stateSyncReactorShim, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) + } + + err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + } + + addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, fmt.Errorf("could not create addrbook: %w", err) + } + + // Optionally, start the pex reactor + // + // TODO: + // + // We need to set Seeds and PersistentPeers on the switch, + // since it needs to be able to use these (and their DNS names) + // even if the PEX is off. We can include the DNS name in the NetAddress, + // but it would still be nice to have a clear list of the current "PersistentPeers" + // somewhere that we can return with net_info. + // + // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. + // Note we currently use the addrBook regardless at least for AddOurAddress + var pexReactor *pex.Reactor + if config.P2P.PexReactor { + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + } + + if config.RPC.PprofListenAddress != "" { + go func() { + logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + }() + } + + node := &Node{ + config: config, + genesisDoc: genDoc, + privValidator: privValidator, + + transport: transport, + sw: sw, + addrBook: addrBook, + nodeInfo: nodeInfo, + nodeKey: nodeKey, + + stateStore: stateStore, + blockStore: blockStore, + bcReactor: bcReactor, + mempoolReactor: mempoolReactor, + mempool: mempool, + consensusState: consensusState, + consensusReactor: consensusReactor, + stateSyncReactor: stateSyncReactor, + stateSync: stateSync, + stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state + pexReactor: pexReactor, + evidencePool: evidencePool, + proxyApp: proxyApp, + txIndexer: txIndexer, + indexerService: indexerService, + eventBus: eventBus, + } + node.BaseService = *service.NewBaseService(logger, "Node", node) + + for _, option := range options { + option(node) + } + + return node, nil +} + +// OnStart starts the Node. It implements service.Service. +func (n *Node) OnStart() error { + now := tmtime.Now() + genTime := n.genesisDoc.GenesisTime + if genTime.After(now) { + n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) + time.Sleep(genTime.Sub(now)) + } + + // Add private IDs to addrbook to block those peers being added + n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) + + // Start the RPC server before the P2P server + // so we can eg. receive txs for the first block + if n.config.RPC.ListenAddress != "" { + listeners, err := n.startRPC() + if err != nil { + return err + } + n.rpcListeners = listeners + } + + if n.config.Instrumentation.Prometheus && + n.config.Instrumentation.PrometheusListenAddr != "" { + n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) + } + + // Start the transport. + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID, n.config.P2P.ListenAddress)) + if err != nil { + return err + } + if err := n.transport.Listen(*addr); err != nil { + return err + } + + n.isListening = true + + if n.config.Mempool.WalEnabled() { + err = n.mempool.InitWAL() + if err != nil { + return fmt.Errorf("init mempool WAL: %w", err) + } + } + + // Start the switch (the P2P server). + err = n.sw.Start() + if err != nil { + return err + } + + // Start the real state sync reactor separately since the switch uses the shim. + if err := n.stateSyncReactor.Start(); err != nil { + return err + } + + // Always connect to persistent peers + err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return fmt.Errorf("could not dial peers from persistent_peers field: %w", err) + } + + // Run state sync + if n.stateSync { + bcR, ok := n.bcReactor.(fastSyncReactor) + if !ok { + return fmt.Errorf("this blockchain reactor does not support switching from state sync") + } + err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis) + if err != nil { + return fmt.Errorf("failed to start state sync: %w", err) + } + } + + return nil +} + +// OnStop stops the Node. It implements service.Service. +func (n *Node) OnStop() { + n.BaseService.OnStop() + + n.Logger.Info("Stopping Node") + + // first stop the non-reactor services + if err := n.eventBus.Stop(); err != nil { + n.Logger.Error("Error closing eventBus", "err", err) + } + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } + + // now stop the reactors + if err := n.sw.Stop(); err != nil { + n.Logger.Error("Error closing switch", "err", err) + } + + // Stop the real state sync reactor separately since the switch uses the shim. + if err := n.stateSyncReactor.Stop(); err != nil { + n.Logger.Error("failed to stop state sync service", "err", err) + } + + // stop mempool WAL + if n.config.Mempool.WalEnabled() { + n.mempool.CloseWAL() + } + + if err := n.transport.Close(); err != nil { + n.Logger.Error("Error closing transport", "err", err) + } + + n.isListening = false + + // finally stop the listeners / external services + for _, l := range n.rpcListeners { + n.Logger.Info("Closing rpc listener", "listener", l) + if err := l.Close(); err != nil { + n.Logger.Error("Error closing listener", "listener", l, "err", err) + } + } + + if pvsc, ok := n.privValidator.(service.Service); ok { + if err := pvsc.Stop(); err != nil { + n.Logger.Error("Error closing private validator", "err", err) + } + } + + if n.prometheusSrv != nil { + if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + } + } +} + +// ConfigureRPC makes sure RPC has all the objects it needs to operate. +func (n *Node) ConfigureRPC() error { + pubKey, err := n.privValidator.GetPubKey() + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + rpccore.SetEnvironment(&rpccore.Environment{ + ProxyAppQuery: n.proxyApp.Query(), + ProxyAppMempool: n.proxyApp.Mempool(), + + StateStore: n.stateStore, + BlockStore: n.blockStore, + EvidencePool: n.evidencePool, + ConsensusState: n.consensusState, + P2PPeers: n.sw, + P2PTransport: n, + + PubKey: pubKey, + GenDoc: n.genesisDoc, + TxIndexer: n.txIndexer, + ConsensusReactor: &consensus.Reactor{}, + EventBus: n.eventBus, + Mempool: n.mempool, + + Logger: n.Logger.With("module", "rpc"), + + Config: *n.config.RPC, + }) + return nil +} + +func (n *Node) startRPC() ([]net.Listener, error) { + err := n.ConfigureRPC() + if err != nil { + return nil, err + } + + listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") + + if n.config.RPC.Unsafe { + rpccore.AddUnsafeRoutes() + } + + config := rpcserver.DefaultConfig() + config.MaxBodyBytes = n.config.RPC.MaxBodyBytes + config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + // we may expose the rpc over both a unix and tcp socket + listeners := make([]net.Listener, len(listenAddrs)) + for i, listenAddr := range listenAddrs { + mux := http.NewServeMux() + rpcLogger := n.Logger.With("module", "rpc-server") + wmLogger := rpcLogger.With("protocol", "websocket") + wm := rpcserver.NewWebsocketManager(rpccore.Routes, + rpcserver.OnDisconnect(func(remoteAddr string) { + err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != tmpubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + }), + rpcserver.ReadLimit(config.MaxBodyBytes), + ) + wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) + listener, err := rpcserver.Listen( + listenAddr, + config, + ) + if err != nil { + return nil, err + } + + var rootHandler http.Handler = mux + if n.config.RPC.IsCorsEnabled() { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: n.config.RPC.CORSAllowedOrigins, + AllowedMethods: n.config.RPC.CORSAllowedMethods, + AllowedHeaders: n.config.RPC.CORSAllowedHeaders, + }) + rootHandler = corsMiddleware.Handler(mux) + } + if n.config.RPC.IsTLSEnabled() { + go func() { + if err := rpcserver.ServeTLS( + listener, + rootHandler, + n.config.RPC.CertFile(), + n.config.RPC.KeyFile(), + rpcLogger, + config, + ); err != nil { + n.Logger.Error("Error serving server with TLS", "err", err) + } + }() + } else { + go func() { + if err := rpcserver.Serve( + listener, + rootHandler, + rpcLogger, + config, + ); err != nil { + n.Logger.Error("Error serving server", "err", err) + } + }() + } + + listeners[i] = listener + } + + // we expose a simplified api over grpc for convenience to app devs + grpcListenAddr := n.config.RPC.GRPCListenAddress + if grpcListenAddr != "" { + config := rpcserver.DefaultConfig() + config.MaxBodyBytes = n.config.RPC.MaxBodyBytes + config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections + config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + listener, err := rpcserver.Listen(grpcListenAddr, config) + if err != nil { + return nil, err + } + go func() { + if err := grpccore.StartGRPCServer(listener); err != nil { + n.Logger.Error("Error starting gRPC server", "err", err) + } + }() + listeners = append(listeners, listener) + } + + return listeners, nil +} + +// startPrometheusServer starts a Prometheus HTTP server, listening for metrics +// collectors on addr. +func (n *Node) startPrometheusServer(addr string) *http.Server { + srv := &http.Server{ + Addr: addr, + Handler: promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, promhttp.HandlerFor( + prometheus.DefaultGatherer, + promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, + ), + ), + } + go func() { + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + // Error starting or closing listener: + n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + } + }() + return srv +} + +// Switch returns the Node's Switch. +func (n *Node) Switch() *p2p.Switch { + return n.sw +} + +// BlockStore returns the Node's BlockStore. +func (n *Node) BlockStore() *store.BlockStore { + return n.blockStore +} + +// ConsensusState returns the Node's ConsensusState. +func (n *Node) ConsensusState() *cs.State { + return n.consensusState +} + +// ConsensusReactor returns the Node's ConsensusReactor. +func (n *Node) ConsensusReactor() *cs.Reactor { + return n.consensusReactor +} + +// MempoolReactor returns the Node's mempool reactor. +func (n *Node) MempoolReactor() *mempl.Reactor { + return n.mempoolReactor +} + +// Mempool returns the Node's mempool. +func (n *Node) Mempool() mempl.Mempool { + return n.mempool +} + +// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. +func (n *Node) PEXReactor() *pex.Reactor { + return n.pexReactor +} + +// EvidencePool returns the Node's EvidencePool. +func (n *Node) EvidencePool() *evidence.Pool { + return n.evidencePool +} + +// EventBus returns the Node's EventBus. +func (n *Node) EventBus() *types.EventBus { + return n.eventBus +} + +// PrivValidator returns the Node's PrivValidator. +// XXX: for convenience only! +func (n *Node) PrivValidator() types.PrivValidator { + return n.privValidator +} + +// GenesisDoc returns the Node's GenesisDoc. +func (n *Node) GenesisDoc() *types.GenesisDoc { + return n.genesisDoc +} + +// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. +func (n *Node) ProxyApp() proxy.AppConns { + return n.proxyApp +} + +// Config returns the Node's config. +func (n *Node) Config() *cfg.Config { + return n.config +} + +//------------------------------------------------------------------------------ + +func (n *Node) Listeners() []string { + return []string{ + fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), + } +} + +func (n *Node) IsListening() bool { + return n.isListening +} + +// NodeInfo returns the Node's Info from the Switch. +func (n *Node) NodeInfo() p2p.NodeInfo { + return n.nodeInfo +} + +func makeNodeInfo( + config *cfg.Config, + nodeKey p2p.NodeKey, + txIndexer txindex.TxIndexer, + genDoc *types.GenesisDoc, + state sm.State, +) (p2p.NodeInfo, error) { + txIndexerStatus := "on" + if _, ok := txIndexer.(*null.TxIndex); ok { + txIndexerStatus = "off" + } + + var bcChannel byte + switch config.FastSync.Version { + case "v0": + bcChannel = bcv0.BlockchainChannel + case "v2": + bcChannel = bcv2.BlockchainChannel + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + } + + nodeInfo := p2p.DefaultNodeInfo{ + ProtocolVersion: p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), + DefaultNodeID: nodeKey.ID, + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, + Channels: []byte{ + bcChannel, + cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, + mempl.MempoolChannel, + evidence.EvidenceChannel, + byte(statesync.SnapshotChannel), byte(statesync.ChunkChannel), + }, + Moniker: config.Moniker, + Other: p2p.DefaultNodeInfoOther{ + TxIndex: txIndexerStatus, + RPCAddress: config.RPC.ListenAddress, + }, + } + + if config.P2P.PexReactor { + nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) + } + + lAddr := config.P2P.ExternalAddress + + if lAddr == "" { + lAddr = config.P2P.ListenAddress + } + + nodeInfo.ListenAddr = lAddr + + err := nodeInfo.Validate() + return nodeInfo, err +} + +//------------------------------------------------------------------------------ + +var ( + genesisDocKey = []byte("genesisDoc") +) + +// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the +// database, or creates one using the given genesisDocProvider and persists the +// result to the database. On success this also returns the genesis doc loaded +// through the given provider. +func LoadStateFromDBOrGenesisDocProvider( + stateDB dbm.DB, + genesisDocProvider GenesisDocProvider, +) (sm.State, *types.GenesisDoc, error) { + // Get genesis doc + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() + if err != nil { + return sm.State{}, nil, err + } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + saveGenesisDoc(stateDB, genDoc) + } + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + if err != nil { + return sm.State{}, nil, err + } + return state, genDoc, nil +} + +// panics if failed to unmarshal bytes +func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { + b, err := db.Get(genesisDocKey) + if err != nil { + panic(err) + } + if len(b) == 0 { + return nil, errors.New("genesis doc not found") + } + var genDoc *types.GenesisDoc + err = tmjson.Unmarshal(b, &genDoc) + if err != nil { + panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) + } + return genDoc, nil +} + +// panics if failed to marshal the given genesis document +func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { + b, err := tmjson.Marshal(genDoc) + if err != nil { + panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + } + if err := db.SetSync(genesisDocKey, b); err != nil { + panic(fmt.Sprintf("Failed to save genesis doc: %v", err)) + } +} + +func createAndStartPrivValidatorSocketClient( + listenAddr, + chainID string, + logger log.Logger, +) (types.PrivValidator, error) { + pve, err := privval.NewSignerListener(listenAddr, logger) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + pvsc, err := privval.NewSignerClient(pve, chainID) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey() + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + const ( + retries = 50 // 50 * 100ms = 5s total + timeout = 100 * time.Millisecond + ) + pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) + + return pvscWithRetries, nil +} + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/test/maverick/node/privval.go b/test/maverick/node/privval.go new file mode 100644 index 0000000000..88b2642f7b --- /dev/null +++ b/test/maverick/node/privval.go @@ -0,0 +1,358 @@ +package node + +import ( + "errors" + "fmt" + "io/ioutil" + + "github.com/lazyledger/lazyledger-core/crypto" + "github.com/lazyledger/lazyledger-core/crypto/ed25519" + tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes" + tmjson "github.com/lazyledger/lazyledger-core/libs/json" + tmos "github.com/lazyledger/lazyledger-core/libs/os" + "github.com/lazyledger/lazyledger-core/libs/tempfile" + tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" + "github.com/lazyledger/lazyledger-core/types" +) + +// ******************************************************************************************************************* +// +// WARNING: FOR TESTING ONLY. DO NOT USE THIS FILE OUTSIDE MAVERICK +// +// ******************************************************************************************************************* + +const ( + stepNone int8 = 0 // Used to distinguish the initial state + stepPropose int8 = 1 + stepPrevote int8 = 2 + stepPrecommit int8 = 3 +) + +// A vote is either stepPrevote or stepPrecommit. +func voteToStep(vote *tmproto.Vote) int8 { + switch vote.Type { + case tmproto.PrevoteType: + return stepPrevote + case tmproto.PrecommitType: + return stepPrecommit + default: + panic(fmt.Sprintf("Unknown vote type: %v", vote.Type)) + } +} + +//------------------------------------------------------------------------------- + +// FilePVKey stores the immutable part of PrivValidator. +type FilePVKey struct { + Address types.Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + PrivKey crypto.PrivKey `json:"priv_key"` + + filePath string +} + +// Save persists the FilePVKey to its filePath. +func (pvKey FilePVKey) Save() { + outFile := pvKey.filePath + if outFile == "" { + panic("cannot save PrivValidator key: filePath not set") + } + + jsonBytes, err := tmjson.MarshalIndent(pvKey, "", " ") + if err != nil { + panic(err) + } + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + if err != nil { + panic(err) + } + +} + +//------------------------------------------------------------------------------- + +// FilePVLastSignState stores the mutable part of PrivValidator. +type FilePVLastSignState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Step int8 `json:"step"` + Signature []byte `json:"signature,omitempty"` + SignBytes tmbytes.HexBytes `json:"signbytes,omitempty"` + + filePath string +} + +// CheckHRS checks the given height, round, step (HRS) against that of the +// FilePVLastSignState. It returns an error if the arguments constitute a regression, +// or if they match but the SignBytes are empty. +// The returned boolean indicates whether the last Signature should be reused - +// it returns true if the HRS matches the arguments and the SignBytes are not empty (indicating +// we have already signed for this HRS, and can reuse the existing signature). +// It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. +func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { + + if lss.Height > height { + return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) + } + + if lss.Height == height { + if lss.Round > round { + return false, fmt.Errorf("round regression at height %v. Got %v, last round %v", height, round, lss.Round) + } + + if lss.Round == round { + if lss.Step > step { + return false, fmt.Errorf( + "step regression at height %v round %v. Got %v, last step %v", + height, + round, + step, + lss.Step, + ) + } else if lss.Step == step { + if lss.SignBytes != nil { + if lss.Signature == nil { + panic("pv: Signature is nil but SignBytes is not!") + } + return true, nil + } + return false, errors.New("no SignBytes found") + } + } + } + return false, nil +} + +// Save persists the FilePvLastSignState to its filePath. +func (lss *FilePVLastSignState) Save() { + outFile := lss.filePath + if outFile == "" { + panic("cannot save FilePVLastSignState: filePath not set") + } + jsonBytes, err := tmjson.MarshalIndent(lss, "", " ") + if err != nil { + panic(err) + } + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + if err != nil { + panic(err) + } +} + +//------------------------------------------------------------------------------- + +// FilePV implements PrivValidator using data persisted to disk +// to prevent double signing. +// NOTE: the directories containing pv.Key.filePath and pv.LastSignState.filePath must already exist. +// It includes the LastSignature and LastSignBytes so we don't lose the signature +// if the process crashes after signing but before the resulting consensus message is processed. +type FilePV struct { + Key FilePVKey + LastSignState FilePVLastSignState +} + +// GenFilePV generates a new validator with randomly generated private key +// and sets the filePaths, but does not call Save(). +func GenFilePV(keyFilePath, stateFilePath string) *FilePV { + privKey := ed25519.GenPrivKey() + + return &FilePV{ + Key: FilePVKey{ + Address: privKey.PubKey().Address(), + PubKey: privKey.PubKey(), + PrivKey: privKey, + filePath: keyFilePath, + }, + LastSignState: FilePVLastSignState{ + Step: stepNone, + filePath: stateFilePath, + }, + } +} + +// LoadFilePV loads a FilePV from the filePaths. The FilePV handles double +// signing prevention by persisting data to the stateFilePath. If either file path +// does not exist, the program will exit. +func LoadFilePV(keyFilePath, stateFilePath string) *FilePV { + return loadFilePV(keyFilePath, stateFilePath, true) +} + +// LoadFilePVEmptyState loads a FilePV from the given keyFilePath, with an empty LastSignState. +// If the keyFilePath does not exist, the program will exit. +func LoadFilePVEmptyState(keyFilePath, stateFilePath string) *FilePV { + return loadFilePV(keyFilePath, stateFilePath, false) +} + +// If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. +func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { + keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + if err != nil { + tmos.Exit(err.Error()) + } + pvKey := FilePVKey{} + err = tmjson.Unmarshal(keyJSONBytes, &pvKey) + if err != nil { + tmos.Exit(fmt.Sprintf("Error reading PrivValidator key from %v: %v\n", keyFilePath, err)) + } + + // overwrite pubkey and address for convenience + pvKey.PubKey = pvKey.PrivKey.PubKey() + pvKey.Address = pvKey.PubKey.Address() + pvKey.filePath = keyFilePath + + pvState := FilePVLastSignState{} + + if loadState { + stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + if err != nil { + tmos.Exit(err.Error()) + } + err = tmjson.Unmarshal(stateJSONBytes, &pvState) + if err != nil { + tmos.Exit(fmt.Sprintf("Error reading PrivValidator state from %v: %v\n", stateFilePath, err)) + } + } + + pvState.filePath = stateFilePath + + return &FilePV{ + Key: pvKey, + LastSignState: pvState, + } +} + +// LoadOrGenFilePV loads a FilePV from the given filePaths +// or else generates a new one and saves it to the filePaths. +func LoadOrGenFilePV(keyFilePath, stateFilePath string) *FilePV { + var pv *FilePV + if tmos.FileExists(keyFilePath) { + pv = LoadFilePV(keyFilePath, stateFilePath) + } else { + pv = GenFilePV(keyFilePath, stateFilePath) + pv.Save() + } + return pv +} + +// GetAddress returns the address of the validator. +// Implements PrivValidator. +func (pv *FilePV) GetAddress() types.Address { + return pv.Key.Address +} + +// GetPubKey returns the public key of the validator. +// Implements PrivValidator. +func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { + return pv.Key.PubKey, nil +} + +// SignVote signs a canonical representation of the vote, along with the +// chainID. Implements PrivValidator. +func (pv *FilePV) SignVote(chainID string, vote *tmproto.Vote) error { + if err := pv.signVote(chainID, vote); err != nil { + return fmt.Errorf("error signing vote: %v", err) + } + return nil +} + +// SignProposal signs a canonical representation of the proposal, along with +// the chainID. Implements PrivValidator. +func (pv *FilePV) SignProposal(chainID string, proposal *tmproto.Proposal) error { + if err := pv.signProposal(chainID, proposal); err != nil { + return fmt.Errorf("error signing proposal: %v", err) + } + return nil +} + +// Save persists the FilePV to disk. +func (pv *FilePV) Save() { + pv.Key.Save() + pv.LastSignState.Save() +} + +// Reset resets all fields in the FilePV. +// NOTE: Unsafe! +func (pv *FilePV) Reset() { + var sig []byte + pv.LastSignState.Height = 0 + pv.LastSignState.Round = 0 + pv.LastSignState.Step = 0 + pv.LastSignState.Signature = sig + pv.LastSignState.SignBytes = nil + pv.Save() +} + +// String returns a string representation of the FilePV. +func (pv *FilePV) String() string { + return fmt.Sprintf( + "PrivValidator{%v LH:%v, LR:%v, LS:%v}", + pv.GetAddress(), + pv.LastSignState.Height, + pv.LastSignState.Round, + pv.LastSignState.Step, + ) +} + +//------------------------------------------------------------------------------------ + +// signVote checks if the vote is good to sign and sets the vote signature. +// It may need to set the timestamp as well if the vote is otherwise the same as +// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). +func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { + height, round, step := vote.Height, vote.Round, voteToStep(vote) + + lss := pv.LastSignState + + _, err := lss.CheckHRS(height, round, step) + if err != nil { + return err + } + + signBytes := types.VoteSignBytes(chainID, vote) + + // It passed the checks. Sign the vote + sig, err := pv.Key.PrivKey.Sign(signBytes) + if err != nil { + return err + } + pv.saveSigned(height, round, step, signBytes, sig) + vote.Signature = sig + return nil +} + +// signProposal checks if the proposal is good to sign and sets the proposal signature. +// It may need to set the timestamp as well if the proposal is otherwise the same as +// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). +func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error { + height, round, step := proposal.Height, proposal.Round, stepPropose + + lss := pv.LastSignState + + _, err := lss.CheckHRS(height, round, step) + if err != nil { + return err + } + + signBytes := types.ProposalSignBytes(chainID, proposal) + + // It passed the checks. Sign the proposal + sig, err := pv.Key.PrivKey.Sign(signBytes) + if err != nil { + return err + } + pv.saveSigned(height, round, step, signBytes, sig) + proposal.Signature = sig + return nil +} + +// Persist height/round/step and signature +func (pv *FilePV) saveSigned(height int64, round int32, step int8, + signBytes []byte, sig []byte) { + + pv.LastSignState.Height = height + pv.LastSignState.Round = round + pv.LastSignState.Step = step + pv.LastSignState.Signature = sig + pv.LastSignState.SignBytes = signBytes + pv.LastSignState.Save() +} diff --git a/tools.mk b/tools/Makefile similarity index 70% rename from tools.mk rename to tools/Makefile index 6e1a61ca0f..fa9081ce20 100644 --- a/tools.mk +++ b/tools/Makefile @@ -43,6 +43,11 @@ TOOLS_DESTDIR ?= $(GOPATH)/bin CERTSTRAP = $(TOOLS_DESTDIR)/certstrap PROTOBUF = $(TOOLS_DESTDIR)/protoc GOODMAN = $(TOOLS_DESTDIR)/goodman +BUF_VERSION = "0.30.0" +BINARY_NAME = "buf" +BIN = "/usr/local/bin" +OS = $(shell uname -s) +ARCH = $(shell uname -m) all: tools .PHONY: all @@ -71,6 +76,14 @@ $(PROTOBUF): @go get github.com/gogo/protobuf/protoc-gen-gogofaster@v1.3.1 .PHONY: protobuf +buf: + @echo "Install Buf" + curl -sSL \ + "https://github.com/bufbuild/buf/releases/download/v$(BUF_VERSION)/$(BINARY_NAME)-$(OS)-$(ARCH)" \ + -o "${BIN}/${BINARY_NAME}" && \ + chmod +x "${BIN}/${BINARY_NAME}" +.PHONY: buf + goodman: $(GOODMAN) $(GOODMAN): @echo "Get Goodman" @@ -80,32 +93,5 @@ $(GOODMAN): tools-clean: rm -f $(CERTSTRAP) $(PROTOBUF) $(GOX) $(GOODMAN) rm -f tools-stamp - rm -rf /usr/local/include/google/protobuf - rm -f /usr/local/bin/protoc + rm -f "${BIN}/${BINARY_NAME}" .PHONY: tooks-clean - -### -# Non Go tools -### - -# Choose protobuf binary based on OS (only works for 64bit Linux and Mac). -# NOTE: On Mac, installation via brew (brew install protoc) might be favorable. -PROTOC_ZIP="" -ifneq ($(OS),Windows_NT) - UNAME_S := $(shell uname -s) - ifeq ($(UNAME_S),Linux) - PROTOC_ZIP="protoc-3.10.1-linux-x86_64.zip" - endif - ifeq ($(UNAME_S),Darwin) - PROTOC_ZIP="protoc-3.10.1-osx-x86_64.zip" - endif -endif - -protoc: - @echo "Get Protobuf" - @echo "In case of any errors, please install directly from https://github.com/protocolbuffers/protobuf/releases" - @curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/$(PROTOC_ZIP) - @unzip -o $(PROTOC_ZIP) -d /usr/local bin/protoc - @unzip -o $(PROTOC_ZIP) -d /usr/local 'include/*' - @rm -f $(PROTOC_ZIP) -.PHONY: protoc diff --git a/tools/tm-signer-harness/Makefile b/tools/tm-signer-harness/Makefile index 47cd036502..1c404ebf81 100644 --- a/tools/tm-signer-harness/Makefile +++ b/tools/tm-signer-harness/Makefile @@ -2,7 +2,8 @@ TENDERMINT_VERSION?=latest BUILD_TAGS?='tendermint' -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" +VERSION := $(shell git describe --always) +BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION) .DEFAULT_GOAL := build diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go index cdc0c7c0ae..a12eb6bdb8 100644 --- a/tools/tm-signer-harness/main.go +++ b/tools/tm-signer-harness/main.go @@ -181,7 +181,7 @@ func main() { } extractKey(flagTMHome, flagKeyOutputPath) case "version": - fmt.Println(version.Version) + fmt.Println(version.TMCoreSemVer) default: fmt.Printf("Unrecognized command: %s\n", flag.Arg(0)) os.Exit(1) diff --git a/types/block.go b/types/block.go index c3e21856ba..3a3f290e81 100644 --- a/types/block.go +++ b/types/block.go @@ -30,6 +30,8 @@ import ( const ( // MaxHeaderBytes is a maximum header size. + // NOTE: Because app hash can be of arbitrary size, the header is therefore not + // capped in size and thus this number should be seen as a soft max MaxHeaderBytes int64 = 626 // MaxOverheadForBlock - maximum overhead to encode a block (up to @@ -465,6 +467,30 @@ func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { return maxDataBytes } +// MakeBlock returns a new block with an empty header, except what can be +// computed from itself. +// It populates the same set of fields validated by ValidateBasic. +func MakeBlock( + height int64, + txs []Tx, evidence []Evidence, intermediateStateRoots []tmbytes.HexBytes, messages Messages, + lastCommit *Commit) *Block { + block := &Block{ + Header: Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 0}, + Height: height, + }, + Data: Data{ + Txs: txs, + IntermediateStateRoots: IntermediateStateRoots{RawRootsList: intermediateStateRoots}, + Evidence: EvidenceData{Evidence: evidence}, + Messages: messages, + }, + LastCommit: lastCommit, + } + block.fillHeader() + return block +} + //----------------------------------------------------------------------------- // Header defines the structure of a Tendermint block header. @@ -737,9 +763,9 @@ const ( const ( // Max size of commit without any commitSigs -> 82 for BlockID, 8 for Height, 4 for Round. MaxCommitOverheadBytes int64 = 94 - // Commit sig size is made up of 32 bytes for the signature, 20 bytes for the address, + // Commit sig size is made up of 64 bytes for the signature, 20 bytes for the address, // 1 byte for the flag and 14 bytes for the timestamp - MaxCommitSigBytes int64 = 77 + MaxCommitSigBytes int64 = 109 ) // CommitSig is a part of the Vote included in a Commit. @@ -1314,6 +1340,7 @@ func (data *Data) ToProto() tmproto.Data { // TODO(ismail): handle evidence here instead of the block // for the sake of consistency + return *tp } @@ -1409,12 +1436,12 @@ func (data *EvidenceData) StringIndented(indent string) string { } // ToProto converts EvidenceData to protobuf -func (data *EvidenceData) ToProto() (*tmproto.EvidenceData, error) { +func (data *EvidenceData) ToProto() (*tmproto.EvidenceList, error) { if data == nil { return nil, errors.New("nil evidence data") } - evi := new(tmproto.EvidenceData) + evi := new(tmproto.EvidenceList) eviBzs := make([]tmproto.Evidence, len(data.Evidence)) for i := range data.Evidence { protoEvi, err := EvidenceToProto(data.Evidence[i]) @@ -1429,7 +1456,7 @@ func (data *EvidenceData) ToProto() (*tmproto.EvidenceData, error) { } // FromProto sets a protobuf EvidenceData to the given pointer. -func (data *EvidenceData) FromProto(eviData *tmproto.EvidenceData) error { +func (data *EvidenceData) FromProto(eviData *tmproto.EvidenceList) error { if eviData == nil { return errors.New("nil evidenceData") } @@ -1479,7 +1506,7 @@ func (data *EvidenceData) splitIntoShares(shareSize int) NamespacedShares { // BlockID type BlockID struct { Hash tmbytes.HexBytes `json:"hash"` - PartSetHeader PartSetHeader `json:"parts"` + PartSetHeader PartSetHeader `json:"part_set_header"` } // Equals returns true if the BlockID matches the given BlockID diff --git a/types/block_test.go b/types/block_test.go index 62526f0a7b..9f42cf4a62 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -268,33 +268,22 @@ func TestCommitValidateBasic(t *testing.T) { } } -func TestMaxCommitSigBytes(t *testing.T) { +func TestMaxCommitBytes(t *testing.T) { // time is varint encoded so need to pick the max. // year int, month Month, day, hour, min, sec, nsec int, loc *Location timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) - cs := &CommitSig{ - BlockIDFlag: BlockIDFlagNil, - ValidatorAddress: crypto.AddressHash([]byte("validator_address")), - Timestamp: timestamp, - Signature: tmhash.Sum([]byte("signature")), - } - - pb := cs.ToProto() - - assert.EqualValues(t, MaxCommitSigBytes, pb.Size()) -} - -func TestMaxCommitBytes(t *testing.T) { - timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) - cs := CommitSig{ BlockIDFlag: BlockIDFlagNil, ValidatorAddress: crypto.AddressHash([]byte("validator_address")), Timestamp: timestamp, - Signature: tmhash.Sum([]byte("signature")), + Signature: crypto.CRandBytes(MaxSignatureSize), } + pbSig := cs.ToProto() + // test that a single commit sig doesn't exceed max commit sig bytes + assert.EqualValues(t, MaxCommitSigBytes, pbSig.Size()) + // check size with a single commit commit := &Commit{ Height: math.MaxInt64, @@ -474,9 +463,11 @@ func TestBlockMaxDataBytes(t *testing.T) { }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {809, 1, 0, true, 0}, - 3: {810, 1, 0, false, 0}, - 4: {811, 1, 0, false, 1}, + 2: {841, 1, 0, true, 0}, + 3: {842, 1, 0, false, 0}, + 4: {843, 1, 0, false, 1}, + 5: {954, 2, 0, false, 1}, + 6: {1053, 2, 100, false, 0}, } for i, tc := range testCases { @@ -503,9 +494,9 @@ func TestBlockMaxDataBytesNoEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {809, 1, true, 0}, - 3: {810, 1, false, 0}, - 4: {811, 1, false, 1}, + 2: {841, 1, true, 0}, + 3: {842, 1, false, 0}, + 4: {843, 1, false, 1}, } for i, tc := range testCases { @@ -718,13 +709,8 @@ func TestDataProtoBuf(t *testing.T) { // TestEvidenceDataProtoBuf ensures parity in converting to and from proto. func TestEvidenceDataProtoBuf(t *testing.T) { - val := NewMockPV() - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) const chainID = "mychain" - v := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, blockID, time.Now()) - v2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, blockID2, time.Now()) - ev := NewDuplicateVoteEvidence(v2, v) + ev := NewMockDuplicateVoteEvidence(math.MaxInt64, time.Now(), chainID) data := &EvidenceData{Evidence: EvidenceList{ev}} _ = data.ByteSize() testCases := []struct { @@ -879,3 +865,444 @@ func TestBlockIDEquals(t *testing.T) { assert.True(t, blockIDEmpty.Equals(blockIDEmpty)) assert.False(t, blockIDEmpty.Equals(blockIDDifferent)) } + +func TestCommitSig_ValidateBasic(t *testing.T) { + testCases := []struct { + name string + cs CommitSig + expectErr bool + errString string + }{ + { + "invalid ID flag", + CommitSig{BlockIDFlag: BlockIDFlag(0xFF)}, + true, "unknown BlockIDFlag", + }, + { + "BlockIDFlagAbsent validator address present", + CommitSig{BlockIDFlag: BlockIDFlagAbsent, ValidatorAddress: crypto.Address("testaddr")}, + true, "validator address is present", + }, + { + "BlockIDFlagAbsent timestamp present", + CommitSig{BlockIDFlag: BlockIDFlagAbsent, Timestamp: time.Now().UTC()}, + true, "time is present", + }, + { + "BlockIDFlagAbsent signatures present", + CommitSig{BlockIDFlag: BlockIDFlagAbsent, Signature: []byte{0xAA}}, + true, "signature is present", + }, + { + "BlockIDFlagAbsent valid BlockIDFlagAbsent", + CommitSig{BlockIDFlag: BlockIDFlagAbsent}, + false, "", + }, + { + "non-BlockIDFlagAbsent invalid validator address", + CommitSig{BlockIDFlag: BlockIDFlagCommit, ValidatorAddress: make([]byte, 1)}, + true, "expected ValidatorAddress size", + }, + { + "non-BlockIDFlagAbsent invalid signature (zero)", + CommitSig{ + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, 0), + }, + true, "signature is missing", + }, + { + "non-BlockIDFlagAbsent invalid signature (too large)", + CommitSig{ + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize+1), + }, + true, "signature is too big", + }, + { + "non-BlockIDFlagAbsent valid", + CommitSig{ + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize), + }, + false, "", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + err := tc.cs.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestHeader_ValidateBasic(t *testing.T) { + testCases := []struct { + name string + header Header + expectErr bool + errString string + }{ + { + "invalid version block", + Header{Version: tmversion.Consensus{Block: version.BlockProtocol + 1}}, + true, "block protocol is incorrect", + }, + { + "invalid chain ID length", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen+1)), + }, + true, "chainID is too long", + }, + { + "invalid height (negative)", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: -1, + }, + true, "negative Height", + }, + { + "invalid height (zero)", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 0, + }, + true, "zero Height", + }, + { + "invalid block ID hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size+1), + }, + }, + true, "wrong Hash", + }, + { + "invalid block ID parts header hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size+1), + }, + }, + }, + true, "wrong PartSetHeader", + }, + { + "invalid last commit hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size+1), + }, + true, "wrong LastCommitHash", + }, + { + "invalid data hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size+1), + }, + true, "wrong DataHash", + }, + { + "invalid evidence hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size+1), + }, + true, "wrong EvidenceHash", + }, + { + "invalid proposer address", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize+1), + }, + true, "invalid ProposerAddress length", + }, + { + "invalid validator hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size+1), + }, + true, "wrong ValidatorsHash", + }, + { + "invalid next validator hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size+1), + }, + true, "wrong NextValidatorsHash", + }, + { + "invalid consensus hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size), + ConsensusHash: make([]byte, tmhash.Size+1), + }, + true, "wrong ConsensusHash", + }, + { + "invalid last results hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size), + ConsensusHash: make([]byte, tmhash.Size), + LastResultsHash: make([]byte, tmhash.Size+1), + }, + true, "wrong LastResultsHash", + }, + { + "valid header", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size), + ConsensusHash: make([]byte, tmhash.Size), + LastResultsHash: make([]byte, tmhash.Size), + }, + false, "", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + err := tc.header.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestCommit_ValidateBasic(t *testing.T) { + testCases := []struct { + name string + commit *Commit + expectErr bool + errString string + }{ + { + "invalid height", + &Commit{Height: -1}, + true, "negative Height", + }, + { + "invalid round", + &Commit{Height: 1, Round: -1}, + true, "negative Round", + }, + { + "invalid block ID", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{}, + }, + true, "commit cannot be for nil block", + }, + { + "no signatures", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + }, + true, "no signatures in commit", + }, + { + "invalid signature", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + Signatures: []CommitSig{ + { + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize+1), + }, + }, + }, + true, "wrong CommitSig", + }, + { + "valid commit", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + Signatures: []CommitSig{ + { + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize), + }, + }, + }, + false, "", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + err := tc.commit.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/types/evidence.go b/types/evidence.go index 194b663d07..f59c6282bb 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -5,9 +5,11 @@ import ( "encoding/binary" "errors" "fmt" + "sort" "strings" "time" + abci "github.com/lazyledger/lazyledger-core/abci/types" "github.com/lazyledger/lazyledger-core/crypto/merkle" "github.com/lazyledger/lazyledger-core/crypto/tmhash" tmjson "github.com/lazyledger/lazyledger-core/libs/json" @@ -18,31 +20,42 @@ import ( // Evidence represents any provable malicious activity by a validator. // Verification logic for each evidence is part of the evidence module. type Evidence interface { - Height() int64 // height of the infraction - Bytes() []byte // bytes which comprise the evidence - Hash() []byte // hash of the evidence - ValidateBasic() error // basic consistency check - String() string // string format of the evidence + ABCI() []abci.Evidence // forms individual evidence to be sent to the application + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + Height() int64 // height of the infraction + String() string // string format of the evidence + Time() time.Time // time of the infraction + ValidateBasic() error // basic consistency check } //-------------------------------------------------------------------------------------- -// DuplicateVoteEvidence contains evidence a validator signed two conflicting -// votes. +// DuplicateVoteEvidence contains evidence of a single validator signing two conflicting votes. type DuplicateVoteEvidence struct { VoteA *Vote `json:"vote_a"` VoteB *Vote `json:"vote_b"` + + // abci specific information + TotalVotingPower int64 + ValidatorPower int64 + Timestamp time.Time } var _ Evidence = &DuplicateVoteEvidence{} // NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given // two conflicting votes. If one of the votes is nil, evidence returned is nil as well -func NewDuplicateVoteEvidence(vote1, vote2 *Vote) *DuplicateVoteEvidence { +func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *ValidatorSet) *DuplicateVoteEvidence { var voteA, voteB *Vote - if vote1 == nil || vote2 == nil { + if vote1 == nil || vote2 == nil || valSet == nil { + return nil + } + idx, val := valSet.GetByAddress(vote1.ValidatorAddress) + if idx == -1 { return nil } + if strings.Compare(vote1.BlockID.Key(), vote2.BlockID.Key()) == -1 { voteA = vote1 voteB = vote2 @@ -51,19 +64,26 @@ func NewDuplicateVoteEvidence(vote1, vote2 *Vote) *DuplicateVoteEvidence { voteB = vote1 } return &DuplicateVoteEvidence{ - VoteA: voteA, - VoteB: voteB, + VoteA: voteA, + VoteB: voteB, + TotalVotingPower: valSet.TotalVotingPower(), + ValidatorPower: val.VotingPower, + Timestamp: blockTime, } } -// String returns a string representation of the evidence. -func (dve *DuplicateVoteEvidence) String() string { - return fmt.Sprintf("DuplicateVoteEvidence{VoteA: %v, VoteB: %v}", dve.VoteA, dve.VoteB) -} - -// Height returns the height this evidence refers to. -func (dve *DuplicateVoteEvidence) Height() int64 { - return dve.VoteA.Height +// ABCI returns the application relevant representation of the evidence +func (dve *DuplicateVoteEvidence) ABCI() []abci.Evidence { + return []abci.Evidence{{ + Type: abci.EvidenceType_DUPLICATE_VOTE, + Validator: abci.Validator{ + Address: dve.VoteA.ValidatorAddress, + Power: dve.ValidatorPower, + }, + Height: dve.VoteA.Height, + Time: dve.Timestamp, + TotalVotingPower: dve.TotalVotingPower, + }} } // Bytes returns the proto-encoded evidence as a byte array. @@ -82,6 +102,21 @@ func (dve *DuplicateVoteEvidence) Hash() []byte { return tmhash.Sum(dve.Bytes()) } +// Height returns the height of the infraction +func (dve *DuplicateVoteEvidence) Height() int64 { + return dve.VoteA.Height +} + +// String returns a string representation of the evidence. +func (dve *DuplicateVoteEvidence) String() string { + return fmt.Sprintf("DuplicateVoteEvidence{VoteA: %v, VoteB: %v}", dve.VoteA, dve.VoteB) +} + +// Time returns the time of the infraction +func (dve *DuplicateVoteEvidence) Time() time.Time { + return dve.Timestamp +} + // ValidateBasic performs basic validation. func (dve *DuplicateVoteEvidence) ValidateBasic() error { if dve == nil { @@ -109,8 +144,11 @@ func (dve *DuplicateVoteEvidence) ToProto() *tmproto.DuplicateVoteEvidence { voteB := dve.VoteB.ToProto() voteA := dve.VoteA.ToProto() tp := tmproto.DuplicateVoteEvidence{ - VoteA: voteA, - VoteB: voteB, + VoteA: voteA, + VoteB: voteB, + TotalVotingPower: dve.TotalVotingPower, + ValidatorPower: dve.ValidatorPower, + Timestamp: dve.Timestamp, } return &tp } @@ -131,7 +169,13 @@ func DuplicateVoteEvidenceFromProto(pb *tmproto.DuplicateVoteEvidence) (*Duplica return nil, err } - dve := NewDuplicateVoteEvidence(vA, vB) + dve := &DuplicateVoteEvidence{ + VoteA: vA, + VoteB: vB, + TotalVotingPower: pb.TotalVotingPower, + ValidatorPower: pb.ValidatorPower, + Timestamp: pb.Timestamp, + } return dve, dve.ValidateBasic() } @@ -146,15 +190,28 @@ func DuplicateVoteEvidenceFromProto(pb *tmproto.DuplicateVoteEvidence) (*Duplica type LightClientAttackEvidence struct { ConflictingBlock *LightBlock CommonHeight int64 + + // abci specific information + ByzantineValidators []*Validator // validators in the validator set that misbehaved in creating the conflicting block + TotalVotingPower int64 // total voting power of the validator set at the common height + Timestamp time.Time // timestamp of the block at the common height } var _ Evidence = &LightClientAttackEvidence{} -// Height returns the last height at which the primary provider and witness provider had the same header. -// We use this as the height of the infraction rather than the actual conflicting header because we know -// that the malicious validators were bonded at this height which is important for evidence expiry -func (l *LightClientAttackEvidence) Height() int64 { - return l.CommonHeight +// ABCI forms an array of abci evidence for each byzantine validator +func (l *LightClientAttackEvidence) ABCI() []abci.Evidence { + abciEv := make([]abci.Evidence, len(l.ByzantineValidators)) + for idx, val := range l.ByzantineValidators { + abciEv[idx] = abci.Evidence{ + Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, + Validator: TM2PB.Validator(val), + Height: l.Height(), + Time: l.Timestamp, + TotalVotingPower: l.TotalVotingPower, + } + } + return abciEv } // Bytes returns the proto-encoded evidence as a byte array @@ -170,10 +227,75 @@ func (l *LightClientAttackEvidence) Bytes() []byte { return bz } -// Hash returns the hash of the header and the commonHeight. This is designed to cause hash collisions with evidence -// that have the same conflicting header and common height but different permutations of validator commit signatures. -// The reason for this is that we don't want to allow several permutations of the same evidence to be committed on -// chain. Ideally we commit the header with the most commit signatures but anything greater than 1/3 is sufficient. +// GetByzantineValidators finds out what style of attack LightClientAttackEvidence was and then works out who +// the malicious validators were and returns them. This is used both for forming the ByzantineValidators +// field and for validating that it is correct. Validators are ordered based on validator power +func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *ValidatorSet, + trusted *SignedHeader) []*Validator { + var validators []*Validator + // First check if the header is invalid. This means that it is a lunatic attack and therefore we take the + // validators who are in the commonVals and voted for the lunatic header + if l.ConflictingHeaderIsInvalid(trusted.Header) { + for _, commitSig := range l.ConflictingBlock.Commit.Signatures { + if !commitSig.ForBlock() { + continue + } + + _, val := commonVals.GetByAddress(commitSig.ValidatorAddress) + if val == nil { + // validator wasn't in the common validator set + continue + } + validators = append(validators, val) + } + sort.Sort(ValidatorsByVotingPower(validators)) + return validators + } else if trusted.Commit.Round == l.ConflictingBlock.Commit.Round { + // This is an equivocation attack as both commits are in the same round. We then find the validators + // from the conflicting light block validator set that voted in both headers. + // Validator hashes are the same therefore the indexing order of validators are the same and thus we + // only need a single loop to find the validators that voted twice. + for i := 0; i < len(l.ConflictingBlock.Commit.Signatures); i++ { + sigA := l.ConflictingBlock.Commit.Signatures[i] + if sigA.Absent() { + continue + } + + sigB := trusted.Commit.Signatures[i] + if sigB.Absent() { + continue + } + + _, val := l.ConflictingBlock.ValidatorSet.GetByAddress(sigA.ValidatorAddress) + validators = append(validators, val) + } + sort.Sort(ValidatorsByVotingPower(validators)) + return validators + } + // if the rounds are different then this is an amnesia attack. Unfortunately, given the nature of the attack, + // we aren't able yet to deduce which are malicious validators and which are not hence we return an + // empty validator set. + return validators +} + +// ConflictingHeaderIsInvalid takes a trusted header and matches it againt a conflicting header +// to determine whether the conflicting header was the product of a valid state transition +// or not. If it is then all the deterministic fields of the header should be the same. +// If not, it is an invalid header and constitutes a lunatic attack. +func (l *LightClientAttackEvidence) ConflictingHeaderIsInvalid(trustedHeader *Header) bool { + return !bytes.Equal(trustedHeader.ValidatorsHash, l.ConflictingBlock.ValidatorsHash) || + !bytes.Equal(trustedHeader.NextValidatorsHash, l.ConflictingBlock.NextValidatorsHash) || + !bytes.Equal(trustedHeader.ConsensusHash, l.ConflictingBlock.ConsensusHash) || + !bytes.Equal(trustedHeader.AppHash, l.ConflictingBlock.AppHash) || + !bytes.Equal(trustedHeader.LastResultsHash, l.ConflictingBlock.LastResultsHash) + +} + +// Hash returns the hash of the header and the commonHeight. This is designed to cause hash collisions +// with evidence that have the same conflicting header and common height but different permutations +// of validator commit signatures. The reason for this is that we don't want to allow several +// permutations of the same evidence to be committed on chain. Ideally we commit the header with the +// most commit signatures (captures the most byzantine validators) but anything greater than 1/3 is sufficient. func (l *LightClientAttackEvidence) Hash() []byte { buf := make([]byte, binary.MaxVarintLen64) n := binary.PutVarint(buf, l.CommonHeight) @@ -183,6 +305,24 @@ func (l *LightClientAttackEvidence) Hash() []byte { return tmhash.Sum(bz) } +// Height returns the last height at which the primary provider and witness provider had the same header. +// We use this as the height of the infraction rather than the actual conflicting header because we know +// that the malicious validators were bonded at this height which is important for evidence expiry +func (l *LightClientAttackEvidence) Height() int64 { + return l.CommonHeight +} + +// String returns a string representation of LightClientAttackEvidence +func (l *LightClientAttackEvidence) String() string { + return fmt.Sprintf("LightClientAttackEvidence{ConflictingBlock: %v, CommonHeight: %d}", + l.ConflictingBlock.String(), l.CommonHeight) +} + +// Time returns the time of the common block where the infraction leveraged off. +func (l *LightClientAttackEvidence) Time() time.Time { + return l.Timestamp +} + // ValidateBasic performs basic validation such that the evidence is consistent and can now be used for verification. func (l *LightClientAttackEvidence) ValidateBasic() error { if l.ConflictingBlock == nil { @@ -213,12 +353,6 @@ func (l *LightClientAttackEvidence) ValidateBasic() error { return nil } -// String returns a string representation of LightClientAttackEvidence -func (l *LightClientAttackEvidence) String() string { - return fmt.Sprintf("LightClientAttackEvidence{ConflictingBlock: %v, CommonHeight: %d}", - l.ConflictingBlock.String(), l.CommonHeight) -} - // ToProto encodes LightClientAttackEvidence to protobuf func (l *LightClientAttackEvidence) ToProto() (*tmproto.LightClientAttackEvidence, error) { conflictingBlock, err := l.ConflictingBlock.ToProto() @@ -226,29 +360,53 @@ func (l *LightClientAttackEvidence) ToProto() (*tmproto.LightClientAttackEvidenc return nil, err } + byzVals := make([]*tmproto.Validator, len(l.ByzantineValidators)) + for idx, val := range l.ByzantineValidators { + valpb, err := val.ToProto() + if err != nil { + return nil, err + } + byzVals[idx] = valpb + } + return &tmproto.LightClientAttackEvidence{ - ConflictingBlock: conflictingBlock, - CommonHeight: l.CommonHeight, + ConflictingBlock: conflictingBlock, + CommonHeight: l.CommonHeight, + ByzantineValidators: byzVals, + TotalVotingPower: l.TotalVotingPower, + Timestamp: l.Timestamp, }, nil } // LightClientAttackEvidenceFromProto decodes protobuf -func LightClientAttackEvidenceFromProto(l *tmproto.LightClientAttackEvidence) (*LightClientAttackEvidence, error) { - if l == nil { +func LightClientAttackEvidenceFromProto(lpb *tmproto.LightClientAttackEvidence) (*LightClientAttackEvidence, error) { + if lpb == nil { return nil, errors.New("empty light client attack evidence") } - conflictingBlock, err := LightBlockFromProto(l.ConflictingBlock) + conflictingBlock, err := LightBlockFromProto(lpb.ConflictingBlock) if err != nil { return nil, err } - le := &LightClientAttackEvidence{ - ConflictingBlock: conflictingBlock, - CommonHeight: l.CommonHeight, + byzVals := make([]*Validator, len(lpb.ByzantineValidators)) + for idx, valpb := range lpb.ByzantineValidators { + val, err := ValidatorFromProto(valpb) + if err != nil { + return nil, err + } + byzVals[idx] = val + } + + l := &LightClientAttackEvidence{ + ConflictingBlock: conflictingBlock, + CommonHeight: lpb.CommonHeight, + ByzantineValidators: byzVals, + TotalVotingPower: lpb.TotalVotingPower, + Timestamp: lpb.Timestamp, } - return le, le.ValidateBasic() + return l, l.ValidateBasic() } //------------------------------------------------------------------------------------------ @@ -386,9 +544,11 @@ func NewMockDuplicateVoteEvidence(height int64, time time.Time, chainID string) return NewMockDuplicateVoteEvidenceWithValidator(height, time, val, chainID) } +// assumes voting power to be 10 and validator to be the only one in the set func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, pv PrivValidator, chainID string) *DuplicateVoteEvidence { pubKey, _ := pv.GetPubKey() + val := NewValidator(pubKey, 10) voteA := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) vA := voteA.ToProto() _ = pv.SignVote(chainID, vA) @@ -397,7 +557,7 @@ func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, vB := voteB.ToProto() _ = pv.SignVote(chainID, vB) voteB.Signature = vB.Signature - return NewDuplicateVoteEvidence(voteA, voteB) + return NewDuplicateVoteEvidence(voteA, voteB, time, NewValidatorSet([]*Validator{val})) } func makeMockVote(height int64, round, index int32, addr Address, diff --git a/types/evidence_test.go b/types/evidence_test.go index 8cc130f58b..dd3fccb65d 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -33,8 +33,11 @@ func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ - VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultVoteTime), - VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultVoteTime.Add(1*time.Minute)), + VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultVoteTime), + VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultVoteTime.Add(1*time.Minute)), + TotalVotingPower: 30, + ValidatorPower: 10, + Timestamp: defaultVoteTime, } } @@ -78,7 +81,8 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { vote1 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID, defaultVoteTime) vote2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID2, defaultVoteTime) - ev := NewDuplicateVoteEvidence(vote1, vote2) + valSet := NewValidatorSet([]*Validator{val.ExtractIntoValidator(10)}) + ev := NewDuplicateVoteEvidence(vote1, vote2, defaultVoteTime, valSet) tc.malleateEvidence(ev) assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) diff --git a/types/genesis.go b/types/genesis.go index 11d18d7eb2..ae54dc9249 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -11,7 +11,6 @@ import ( "github.com/lazyledger/lazyledger-core/crypto" tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes" tmjson "github.com/lazyledger/lazyledger-core/libs/json" - tmos "github.com/lazyledger/lazyledger-core/libs/os" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" tmtime "github.com/lazyledger/lazyledger-core/types/time" ) @@ -52,7 +51,8 @@ func (genDoc *GenesisDoc) SaveAs(file string) error { if err != nil { return err } - return tmos.WriteFile(file, genDocBytes, 0644) + + return ioutil.WriteFile(file, genDocBytes, 0644) // nolint:gosec } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc diff --git a/types/protobuf.go b/types/protobuf.go index 6904e6ed6e..4d5fcb65a9 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -1,9 +1,6 @@ package types import ( - "fmt" - "reflect" - abci "github.com/lazyledger/lazyledger-core/abci/types" "github.com/lazyledger/lazyledger-core/crypto" "github.com/lazyledger/lazyledger-core/crypto/ed25519" @@ -111,29 +108,6 @@ func (tm2pb) ConsensusParams(params *tmproto.ConsensusParams) *abci.ConsensusPar } } -// ABCI Evidence includes information from the past that's not included in the evidence itself -// so Evidence types stays compact. -// XXX: panics on nil or unknown pubkey type -func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet) abci.Evidence { - - // set type - var evType abci.EvidenceType - switch ev.(type) { - case *DuplicateVoteEvidence: - evType = abci.EvidenceType_DUPLICATE_VOTE - case *LightClientAttackEvidence: - evType = abci.EvidenceType_LIGHT_CLIENT_ATTACK - default: - panic(fmt.Sprintf("unknown evidence type: %v %v", ev, reflect.TypeOf(ev))) - } - - return abci.Evidence{ - Type: evType, - Height: ev.Height(), - TotalVotingPower: valSet.TotalVotingPower(), - } -} - // XXX: panics on nil or unknown pubkey type func (tm2pb) NewValidatorUpdate(pubkey crypto.PubKey, power int64) abci.ValidatorUpdate { pubkeyABCI, err := cryptoenc.PubKeyToProto(pubkey) diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 90ff507faa..ba252f220d 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -60,26 +60,6 @@ func TestABCIConsensusParams(t *testing.T) { assert.Equal(t, *cp, cp2) } -func TestABCIEvidence(t *testing.T) { - val := NewMockPV() - blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) - blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) - const chainID = "mychain" - pubKey, err := val.GetPubKey() - require.NoError(t, err) - ev := &DuplicateVoteEvidence{ - VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultVoteTime), - VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultVoteTime), - } - abciEv := TM2PB.Evidence( - ev, - NewValidatorSet([]*Validator{NewValidator(pubKey, 10)}), - ) - - assert.Equal(t, abci.EvidenceType_DUPLICATE_VOTE, abciEv.Type) - assert.Equal(t, ev.Height(), abciEv.GetHeight()) -} - type pubKeyEddie struct{} func (pubKeyEddie) Address() Address { return []byte{} } diff --git a/types/test_util.go b/types/test_util.go index d563730608..584659a10e 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -4,10 +4,7 @@ import ( "fmt" "time" - tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes" tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types" - tmversion "github.com/lazyledger/lazyledger-core/proto/tendermint/version" - "github.com/lazyledger/lazyledger-core/version" ) func MakeCommit(blockID BlockID, height int64, round int32, @@ -81,29 +78,3 @@ func MakeVote( return vote, nil } - -// MakeBlock returns a new block with an empty header, except what can be -// computed from itself. -// It populates the same set of fields validated by ValidateBasic. -// TODO(ismail): tell the IG team that this method isn't only used in tests -// hence, test_util.go is quite misleading. -func MakeBlock( - height int64, - txs []Tx, evidence []Evidence, intermediateStateRoots []tmbytes.HexBytes, messages Messages, - lastCommit *Commit) *Block { - block := &Block{ - Header: Header{ - Version: tmversion.Consensus{Block: version.BlockProtocol, App: 0}, - Height: height, - }, - Data: Data{ - Txs: txs, - IntermediateStateRoots: IntermediateStateRoots{RawRootsList: intermediateStateRoots}, - Evidence: EvidenceData{Evidence: evidence}, - Messages: messages, - }, - LastCommit: lastCommit, - } - block.fillHeader() - return block -} diff --git a/types/validator_set.go b/types/validator_set.go index 070865118b..b58bf4f0a8 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -788,11 +788,11 @@ func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Comm ) // Safely calculate voting power needed. - totalVotingPowerMulByNumerator, overflow := safeMul(vals.TotalVotingPower(), trustLevel.Numerator) + totalVotingPowerMulByNumerator, overflow := safeMul(vals.TotalVotingPower(), int64(trustLevel.Numerator)) if overflow { return errors.New("int64 overflow while calculating voting power needed. please provide smaller trustLevel numerator") } - votingPowerNeeded := totalVotingPowerMulByNumerator / trustLevel.Denominator + votingPowerNeeded := totalVotingPowerMulByNumerator / int64(trustLevel.Denominator) for idx, commitSig := range commit.Signatures { // No need to verify absent or nil votes. diff --git a/version/version.go b/version/version.go index e87ceba78a..5082d73c19 100644 --- a/version/version.go +++ b/version/version.go @@ -1,27 +1,12 @@ package version var ( - // GitCommit is the current HEAD set using ldflags. - GitCommit string - - // Version is the built softwares version. - Version = TMCoreSemVer -) - -func init() { - if GitCommit != "" { - Version += "-" + GitCommit - } -} - -const ( // TMCoreSemVer is the current version of Tendermint Core. // It's the Semantic Version of the software. - // Must be a string because scripts like dist.sh read this file. - // XXX: Don't change the name of this variable or you will break - // automation :) - TMCoreSemVer = "0.34.0" + TMCoreSemVer string +) +const ( // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"