diff --git a/.clang-format b/.clang-format index 968d9f6ac31..f7664c9d7b4 100644 --- a/.clang-format +++ b/.clang-format @@ -8,3 +8,4 @@ DerivePointerAlignment: false PointerAlignment: Right AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: false + diff --git a/.clang-format-ignore b/.clang-format-ignore new file mode 100644 index 00000000000..31cb9e05bde --- /dev/null +++ b/.clang-format-ignore @@ -0,0 +1,2 @@ +CMakeLists.txt +*.cmake diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e9d18c35c4e..a5c800f3c8d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -14,6 +14,13 @@ +### Issue + + + + + ### Benefits diff --git a/.github/README.md b/.github/README.md new file mode 100644 index 00000000000..100b4aee2e7 --- /dev/null +++ b/.github/README.md @@ -0,0 +1,31 @@ +GitHub Actions +============== + +GitHub Workflow description in YAML does not support anchors. +There are several workarounds => anyway they come to building-editing workflow yaml from source. +So I suggest yet another one `make-workflows.sh` based on YAML tool `yq`. + +### USAGE +0. Move your workflows to `.github/*.src.yml` +1. Put `make-workflows.sh` to directory `.github/` +2. (optional) Copy or link `pre-commit-hook.sh` to `.git/hooks/pre-commit` + Like `ln -s ../../.github/pre-commit-hook.sh .git/hooks/pre-commit` + +### Using pre-commit +```yaml +repos: +- repo: local + hooks: + - id: make-workflows + name: Make GitHub workflows from *.src.yml + entry: bash -c '.github/make-workflows.sh && git add .github/workflows' + language: system + types: [yaml] + pass_filenames: false +``` + +### Links +1. https://stackoverflow.com/questions/67368724/share-same-steps-for-different-github-actions-jobs +2. https://github.community/t/support-for-yaml-anchors/16128/60 +3. https://github.com/mithro/actions-includes +4. https://github.com/allejo/gha-workflows diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml new file mode 100644 index 00000000000..6fd77dd9239 --- /dev/null +++ b/.github/build-iroha1.src.yml @@ -0,0 +1,806 @@ +name: Iroha1 + +## TODO 1. [vcpkg] build only Debug or only Release - reduce vcpkg build duration and output size 2times +## see directory triplets/, `vcpkg help triplets` and link: https://stackoverflow.com/a/52781832/3743145 +## TODO 2. [vcpkg] Do not rebuild vcpkg-tool every time [takes about 1min], see build_iroha_deps.sh +## TODO 3. [vcpkg] Use binarycaching on CI https://devblogs.microsoft.com/cppblog/vcpkg-accelerate-your-team-development-environment-with-binary-caching-and-manifests/ +## TODO 3. [speed] better caching utilizing diff-backup style tools like restic and rclone +## to improve performance and reduce storage consumption. +## Store ccache with rclone, maybe Store vcpkg binarycache with rclone +## The problem/pitfail is to get access token during build from fork +## TODO 4. [speed] Self-hosted MacOS and Windows +## Need more powerful Mac machine to reduce build time from 40min to 10min with hot vcpkg cache, +## and from 2hrs to 27min without cache +## GitHub's default runners also idles much time before started when build often +## TODO 5. [speed,optimization,resources] Cancel previous runs if their duration is less than 10 minutes, protect almost done builds from being killed +## TODO [prettify,documentation] update status badges in README.md +## TODO windows +## FIXME checkout issue, see https://github.com/actions/runner/issues/434 +## TODO actions/create-release for main branch and tags + +## CHEAT SHEET +## check if PR head repo is fork: ${{ github.event.pull_request.head.repo.fork }} +## check if PR is from other repo: ${{ github.event.pull_request.head.repo == github.event.pull_request.base.repo }} in this case secrets are empty +## ternary: ${{ fromJSON('["no", "yes"]')[github.ref != 'refs/heads/master'] }} + +## TODO make these different workflows - reduce number of conditionals inside jobs like 'step_detect_commented_pr' +on: + push: + branches: [ main, support/1.*, edge, develop, test-ci, gha, gha/*, gha-*, \*-with-gha ] + tags: [ 'v*' ] + pull_request: + branches: [ main, support/1.*, edge, develop ] ## target branches + workflow_dispatch: + ## NOTE: Able to run via cmdline: gh workflow run Iroha1 + inputs: + build_spec: + description: 'See chatops-gen-matrix.sh, example "/build ubuntu macos gcc-9 burrow"' + required: false + default: '/build' + issue_comment: + types: [created, edited] + schedule: + - cron: '12 22 * * *' + +jobs: + ## GitHub Actions Workflow does not support yaml anchors + ## and that is why there is a workaround with make-workflows.sh + ## You should `pre-commit install` or use `pre-commit-hook.sh`, + ## anyway please read .github/README.md + check_workflow_yaml_coressponds_to_src_yaml: + runs-on: ubuntu-latest + name: Check if github workflows were properly made from sources + steps: + - &step_detect_commented_pr + name: REF and SHA of commented PR to ENV + if: github.event.comment + run: > + curl -fsSL ${{github.event.issue.pull_request.url}} + -H "Authorization: token ${{github.token}}" | + jq -r ' + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - &step_checkout + name: Checkout + uses: actions/checkout@v2 + with: &step_checkout_with + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - + run: sudo snap install yq + - + name: Check if .github/workflows/*.yml correspond to *.src.yml + run: | + set -x + [[ $(./.github/make-workflows.sh -x --worktree) = *"everything is up to date" ]] + + pr_comment_reaction_rocket: + ## Just to react to valid comment with rocket + runs-on: ubuntu-latest + if: ${{ github.event.comment && + github.event.issue.pull_request && + startsWith(github.event.comment.body, '/build') }} + steps: + - &step_show_context + name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - + name: Reaction + run: | + # send reaction to comment to show build was triggered + curl ${{github.event.comment.url}}/reactions \ + -X POST \ + -d '{"content":"rocket"}' \ + -H "Accept: application/vnd.github.squirrel-girl-preview+json" \ + -H "Authorization: token ${{github.token}}" + + ## This job is to generate build matrixes for build jobs + ## The matrixes depend on what is requeted to be build + ## At the moment there are several options: + ## - default on pushes, pull requests + ## - on comment to pull request according to comment message (chat-ops) + ## - TODO on workflow_dispatch according to its build_spec + ## - TODO all on schedule + generate_matrixes: + runs-on: ubuntu-latest + if: ${{ (github.event_name != 'comment') || ( github.event.comment && + github.event.issue.pull_request && + startsWith(github.event.comment.body, '/build') ) }} + # needs: check_workflow_yaml_coressponds_to_src_yaml + steps: + - *step_show_context + - *step_checkout + - + name: Generate matrix for build triggered by chat-ops - comment to PR + if: github.event.comment && github.event.issue.pull_request + id: comment_body + run: echo "${{github.event.comment.body}}" >/tmp/comment_body + - + name: Generate default matrix for regular builds + if: ${{ steps.comment_body.outcome == 'skipped' }} + run: | + set -x + commit_message_body_build_spec(){ + git fetch origin ${{github.event.after}} --depth=1 + git log --format=%B -n1 ${{github.event.after}} | grep '^/build ' + } + case ${{github.event_name}} in + pull_request) commit_message_body_build_spec >/tmp/comment_body || + echo >/tmp/comment_body "/build debug" ;; + push) commit_message_body_build_spec >/tmp/comment_body || { + echo "/build ubuntu debug release" + echo "/build macos debug" + echo "/build windows debug" + } >/tmp/comment_body ;; + schedule) echo >/tmp/comment_body "/build all" ;; + workflow_dispatch) echo >/tmp/comment_body "${{github.event.inputs.build_spec}}" ;; + *) echo >&2 "::error::Unexpected event"; false ;; + esac + - + name: Generate matrixes + id: matrixes + run: | + set -x + cat /tmp/comment_body | .github/chatops-gen-matrix.sh + echo "::set-output name=matrix_ubuntu::$(cat matrix_ubuntu)" + echo "::set-output name=matrix_ubuntu_release::$(cat matrix_ubuntu_release)" + echo "::set-output name=matrix_ubuntu_debug::$(cat matrix_ubuntu_debug)" + echo "::set-output name=matrix_macos::$(cat matrix_macos)" + echo "::set-output name=matrix_windows::$(cat matrix_windows)" + ##TODO report errors and warnings as answer as issue comment (chat-ops) + - + name: Reaction confused + if: failure() && github.event.comment + run: | + # send reaction to comment to show build was triggered + curl ${{github.event.comment.url}}/reactions \ + -X POST \ + -d '{"content":"confused"}' \ + -H "Accept: application/vnd.github.squirrel-girl-preview+json" \ + -H "Authorization: token ${{github.token}}" + - + name: Reaction rocket + if: github.event.comment + run: | + # send reaction to comment to show build was triggered + curl ${{github.event.comment.url}}/reactions \ + -X POST \ + -d '{"content":"rocket"}' \ + -H "Accept: application/vnd.github.squirrel-girl-preview+json" \ + -H "Authorization: token ${{github.token}}" + outputs: + matrix_ubuntu: ${{steps.matrixes.outputs.matrix_ubuntu}} + matrix_ubuntu_release: ${{steps.matrixes.outputs.matrix_ubuntu_release}} + matrix_ubuntu_debug: ${{steps.matrixes.outputs.matrix_ubuntu_debug}} + matrix_macos: ${{steps.matrixes.outputs.matrix_macos}} + matrix_windows: ${{steps.matrixes.outputs.matrix_windows}} + + ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies + ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, + ## and conditional tags :edge (for development branches) and :latest (for git-tags) + ## Note: image is push only when DockerHub login-token pair available - not to PRs from forks + Docker-iroha-builder: + needs: check_workflow_yaml_coressponds_to_src_yaml + runs-on: ubuntu-latest #[ self-hosted, Linux ] + env: &env_dockerhub + DOCKERHUB_ORG: hyperledger ## Must be hyperledger, also can use iroha1, cannot use ${{ secrets.DOCKERHUB_ORG }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + steps: + - *step_show_context + - &step_system_info + name: System info + run: | + set -x + whoami + id $(whoami) + free || vm_stat | perl -ne '/page size of (\d+)/ and $size=$1; + /Pages\s+([^:]+)[^\d]+(\d+)/ and printf("%-16s % 16.2f Mi\n", "$1:", $2 * $size / 1048576);' + df -h + - &step_build_info + name: Build info + run: | + cat << 'END' + ref:${{github.ref}} + sha:${{github.sha}} + run_number:${{github.run_number}} + event_name:${{github.event_name}} + event.action:${{github.event.action}} + event.issue.number:${{ github.event.issue.number }} + END + - *step_detect_commented_pr + - *step_checkout + - &step_docker_tag + name: Determine dockertag + id: dockertag + env: + dockertag: ${{ hashFiles('docker/iroha-builder/**') }} + run: | + echo "::set-output name=dockertag::$dockertag" + echo >>$GITHUB_ENV dockertag=$dockertag + test -n "$DOCKERHUB_ORG" || { + echo ::error::"DOCKERHUB_ORG must contain value" + false + } + - &step_docker_login + name: Login to DockerHub + if: ${{ env.DOCKERHUB_TOKEN != '' && env.DOCKERHUB_USERNAME != '' }} + id: docker_login + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - &step_warn_docker_no_push + name: Possible WARNING + if: ${{ steps.docker_login.outcome == 'skipped' }} + run: echo "::warning::DOCKERHUB_TOKEN and DOCKERHUB_USERNAME are empty. Will build but NOT push." + - &step_docker_meta + name: Docker meta + id: meta + uses: docker/metadata-action@v3 + with: &step_docker_meta_with + images: ${{ env.DOCKERHUB_ORG }}/iroha-builder + tags: | + type=raw,value=${{env.dockertag}} + type=ref,event=branch + type=ref,event=pr + type=ref,event=tag + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=schedule + type=edge,branch=support/1.2.x + type=edge,branch=develop + type=edge,branch=test-ci + type=sha,prefix=commit-,format=short + type=sha,prefix=commit-,format=long + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed + ## - tag 'edge' when branch support/1.2.x is pushed + ## - schedule, see the docs + - &step_docker_buildx + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - &step_docker_cache + name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{env.dockertag}} + restore-keys: ${{ runner.os }}-buildx- + - &step_docker_build_and_push + id: build_and_push + name: Build and push + uses: docker/build-push-action@v2 + with: &step_docker_build_and_push_with + context: docker/iroha-builder/ + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new + push: ${{ steps.docker_login.outcome == 'success' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + - &step_docker_move_cache + # Temp fix + # https://github.com/docker/build-push-action/issues/252 + # https://github.com/moby/buildkit/issues/1896 + name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + - + name: Check if dockertaghash exists in remote registry + id: dockertag_already + run: | + exists=$( curl -fL https://hub.docker.com/v2/repositories/$DOCKERHUB_ORG/iroha-builder/tags | + jq 'any( .results[]|.name == "${{env.dockertag}}" ; .)' ) + echo "::set-output name=exists::$exists" + + if test $exists = true ;then + tag=$dockertag + else + tag=edge + fi + container="$DOCKERHUB_ORG/iroha-builder:$tag" + echo "::set-output name=container::$container" + echo "::set-output name=container_tag::$tag" + outputs: + ## WARN secret dropped from output!, output may not contain secret, + ## and secret cannot be used in job:container directly, and there is no github non-secret variables... + ## if dockertag is already pushed then use it. But let it be empty when tag does not exist remotely. + dockertag: ${{steps.dockertag.outputs.dockertag}} + _dockertag: :${{steps.dockertag.outputs.dockertag}} + pushed: ${{ steps.docker_login.outcome == 'success' && steps.build_and_push.outcome == 'success' }} + dockertag_already_exists: ${{steps.dockertag_already.outputs.exists}} + container: ${{steps.dockertag_already.outputs.container}} + container_tag: ${{steps.dockertag_already.outputs.container_tag}} + + ## Build iroha in a container made of the image earlier prepared + ## Result artifacts are + ## - stand-alone irohad (linked statically) + ## - iroha.deb (with irohad inside) + build-UD: &job_ubuntu + needs: + - Docker-iroha-builder + - generate_matrixes + runs-on: [ self-hosted, Linux ] + ## Container is taken from previous job + container: #&container + image: &container_image ${{needs.Docker-iroha-builder.outputs.container}} + options: --user root + strategy: &strategy_ubuntu_debug + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_debug ) }} + if: &if_ubuntu_debug ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_debug ).include[0] }} + defaults: + run: + shell: bash + steps: &build_steps + - *step_show_context + - &step_show_needs + name: Show needs + run: | + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + - + run: test -n "$container" + env: + container: *container_image + - + if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || + needs.Docker-iroha-builder.outputs.container == '' }} + name: Possible WARNING + env: + container: *container_image + dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} + run: | + cat <>$GITHUB_ENV CC=$CC + echo >>$GITHUB_ENV CXX=$(echo $CC | sed -es,gcc,g++, -es,clang,clang++,) + echo >>$GITHUB_PATH $CCACHE_PATH + ls -lA $CCACHE_PATH + $(realpath $CCACHE_PATH/gcc) --show-config + echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p') + echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}') + - &step_restore_ccache + name: Restore cache ccache + uses: actions/cache@v2 + with: + path: ${{ env._CCACHE_DIR }} + key: ${{ runner.os }}-ccache + - &step_store_ccache_stats + run: ccache --show-stats | tee /tmp/ccache-stats + - &step_vcpkg_cache + if: ${{false}} ## This works bad when something patched or something updated, seems they does not recalc hash of changed packages. See todos in the begining of file. + ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md + name: Restore cache vcpkg + uses: actions/cache@v2 + with: + path: | + ${{ env.HOME }}/.cache/vcpkg/archives + # $HOME/.cache/vcpkg + # build-vcpkg/installed + # build/vcpkg_installed ## This is default folder for manual installation in manifest mode + key: ${{ runner.os }}-${{matrix.CC}}-vcpkg + # key: ${{ runner.os }}-vcpkg-${{matrix.CC}}-${{ hashFiles('build-vcpkg/installed/vcpkg/status') }} + # restore-keys: ${{ runner.os }}-vcpkg-${{matrix.CC}}- + - &step_vcpkg_build + name: Build iroha vcpkg dependancies + run: ./vcpkg/build_iroha_deps.sh $PWD/build-vcpkg; test -f $PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake + ## Takes 48m16s on default GitHub runner with 2 cores + ## Takes 13m41s on self-hosted AWS EC2 c5.x4large + # ________________________________________________________ + # Executed in 32,08 mins fish external + # usr time 110,52 mins 0,24 millis 110,52 mins + # sys time 12,26 mins 1,34 millis 12,26 mins + # + # All requested packages are currently installed. + # ________________________________________________________ + # Executed in 3,17 secs fish external + # usr time 2,05 secs 128,00 micros 2,05 secs + # sys time 0,70 secs 575,00 micros 0,70 secs + - &step_cmake_configure + name: CMake configure + ## Takes 13s on regular GitHub runner + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake + -DCMAKE_BUILD_TYPE=${{ matrix.BuildType }} + -GNinja ${{ matrix.CMAKE_USE }} + -DTESTING=ON + -DPACKAGE_DEB=ON + #-DCMAKE_VERBOSE_MAKEFILE=ON + - &step_cmake_build + name: CMake build + run: | + set -x + ## reduce memory usage to do not overflow + cmake --build build --config ${{ matrix.BuildType }} -- -j$(nproc | awk '{printf("%.0f",$1*0.77)}') + ## Debug takes 18m44s on regular GitHub runner + ## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large + ## Release takes 2m58s on self-hosted AWS EC2 c5.x4large + - &step_cpack + name: CPack (linux only) + run: cd build; cpack; ## cmake --build build --target package + - &step_compare_ccache_stats + run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true + - &step_always_after_build + name: Show free space and disk usage + if: ${{ always() }} + run: | + df -h || true + - &step_artifact_suffix + name: Generate artifact suffix depending on matrix + env: &step_artifact_suffix_env + os: ubuntu ## maybe ${{ run.os }} + CC: ${{ matrix.CC }} + BuildType: ${{ matrix.BuildType }} + CMAKE_USE: ${{ matrix.CMAKE_USE }} + run: | + set -x + cc=$(echo $CC | sed -Ee's,[-/],,g' ) + build_type=$(echo $BuildType | tr A-Z a-z | sed -E -es,debug,dbg, -es,release,rel, ) + test $build_type = dbg -o $build_type = rel + uses=$(echo $CMAKE_USE | + tr ' ' '\n' | sed -nE -e's,.*USE_([a-zA-Z]+)=ON.*,\1,gp ; s, ,-, ;' | xargs | tr ' ' - | tr A-Z a-z ) + _os=${os:+-$os} _cc=${cc:+-$cc} _build_type=${build_type:+-$build_type} _uses=${uses:+-$uses} + echo >>$GITHUB_ENV ARTIFACT_SUFFIX=$_os$_cc$_build_type$_uses + echo >>$GITHUB_ENV _uses_suffix=$_uses + echo >>$GITHUB_ENV _compiler_suffix=$(test $cc != gcc9 && echo $_cc) + echo >>$GITHUB_ENV _debug_suffix=$(test "$build_type" = dbg && echo -debug || true) + # echo >>$GITHUB_ENV _os=$_os _cc=$_cc _build_type=$_build_type _uses=$_uses + - &step_artifact_irohad + name: Upload artifact irohad + uses: actions/upload-artifact@v2 + with: + name: irohad${{env.ARTIFACT_SUFFIX}} + path: &step_artifact_irohad_path | + build/bin/irohad + build/bin/iroha-cli + - &step_artifact_iroha_deb + name: Upload artifact iroha-deb + uses: actions/upload-artifact@v2 + with: + name: iroha-deb${{env.ARTIFACT_SUFFIX}} + path: &step_artifact_iroha_deb_path | + build/*.deb + - &step_artifact_tests + if: ${{ false }} ## Maybe test in another job + name: Upload artifact tests + uses: actions/upload-artifact@v2 + with: + name: iroha-tests-ubuntu${{env.ARTIFACT_SUFFIX}} + path: | + build/test_bin/** + build/test_data/** + - &step_ctest + timeout-minutes: 40 + name: CTest + run: | + set -xeuo pipefail + if test $(uname) = Darwin ;then + ## This is a common portable solution, but Debian and Ubuntu have their own wrappers + initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database + postgres -D $PWD/postgres_database -p5432 2>&1 >/tmp/postgres.log & { sleep .3; kill -0 $!; } ## use pg_ctl no need & + else + mkdir postgres_database && chown iroha-ci postgres_database + echo /usr/lib/postgresql/12/bin/initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database | su iroha-ci + echo /usr/lib/postgresql/12/bin/pg_ctl start -D $PWD/postgres_database --log=$PWD/postgres_database/log | su iroha-ci + # ## Need to go debian-specific way because + # ## initdb is not allowed to be run as root, but we need to run as root + # ## because GitHub actions runners have much issues with permissions. + # cat </etc/postgresql/12/main/pg_hba.conf + # # TYPE DATABASE USER ADDRESS METHOD + # local all all trust + # host all all 127.0.0.1/32 trust + # host all all ::1/128 trust + # local replication all trust + # host replication all 127.0.0.1/32 trust + # host replication all ::1/128 trust + # END + # pg_ctlcluster 12 main start ## Cluster 'main' exist by default + # #OR pg_createcluster -p 5432 --start 12 iroha -- --locale=C --encoding=UTF-8 --username=postgres + fi + ## Run module_* tests in parallel and others subsequently + cd build + ## FIXME dissallow to fail, remove '||true' after ctest + cat | sort -u >ALLOW_TO_FAIL <>$GITHUB_PATH $HOME/go/bin + - *step_detect_commented_pr + - *step_checkout + - <<: *step_export_cxx + env: + <<: *step_export_cxx_env + CCACHE_PATH: /usr/local/opt/ccache/libexec + - *step_restore_ccache + - *step_store_ccache_stats + - *step_vcpkg_cache + - *step_vcpkg_build + - *step_cmake_configure + - *step_cmake_build + - *step_compare_ccache_stats + - *step_always_after_build + - <<: *step_artifact_suffix + name: Generate artifact suffix >> env.ARTIFACT_SUFFIX + #run: inherited + env: + <<: *step_artifact_suffix_env + os: macos ##${{run.os}} + - <<: *step_artifact_irohad + with: + name: irohad-macos${{env.ARTIFACT_SUFFIX}} + path: *step_artifact_irohad_path + - *step_artifact_tests + - &step_brew_postgres + name: Install Postgres on MacOS + run: brew install postgresql + ## ToDo may be optimize, i.e. cache package + - <<: *step_ctest + timeout-minutes: 70 + + ## Just to align picture + prepare-windows-env: + needs: check_workflow_yaml_coressponds_to_src_yaml + runs-on: windows-latest + steps: + - *step_show_context + defaults: + run: + shell: bash + + build-W: + needs: + - prepare-windows-env + - generate_matrixes + runs-on: windows-latest + strategy: + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_windows ) }} + #if: ${{ false }} ##FIXME Somehow cmake fails to find GTest and others + if: ${{ false && ( fromJSON( needs.generate_matrixes.outputs.matrix_windows ).include[0] ) }} + # matrix: + # BuildYype: [ Debug ] #,Release, RelWithDebInfo + defaults: + run: + shell: bash #pwsh + working-directory: &workdir 'C:\github\iroha' ## Use disk C: because D: is out of space + steps: + - name: Create working-directory, export WORKDIR + run: | + set -x + mkdir -p "$WORKDIR" + echo $PWD + echo >>$GITHUB_ENV WORKDIR="$WORKDIR" + working-directory: 'C:\' + env: { WORKDIR: *workdir } + - name: uname in bash + run: uname + shell: bash + - name: uname in [default] pwsh shell + run: uname + shell: pwsh + - &step_choco_install + name: Chocolatey install + run: choco install cmake ninja #ccache + - *step_checkout + - name: move to workdir + run: | + set -x + echo $PWD + shopt -s dotglob nullglob + mv -vf * -t "$WORKDIR" + working-directory: + #- *step_restore_ccache + #- *step_vcpkg_cache + - *step_vcpkg_build + - *step_cmake_configure + - *step_cmake_build + - *step_always_after_build + - + name: Install Postgres on Windows + run: choco install postgresql + # - *step_ctest + + ## Build and publish docker image named 'hyperledger/iroha' with irohad and iroha tools inside. + ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, + ## and conditional tags :edge (for development branches) and :latest (for git-tags) + ## Those docker image tags could be extended with suffixes with compiler and build type like + ## -gcc10, -clang, -debug, -gcc10-debug. + ## Result image name could look like: hyperledger/iroha:pr-1117, hyperledger/iroha-burrow:commit-XXXXX-debug + ## Note: image is push only when DockerHub login-token pair available - not to PRs from forks + docker-R: &job_docker_image_release + needs: + - build-UR + - generate_matrixes + runs-on: [ self-hosted, Linux ] ## or ubuntu-latest + strategy: *strategy_ubuntu_release + if: *if_ubuntu_release + env: &env_dockerhub_release + <<: *env_dockerhub + IMAGE_NAME: iroha + steps: + - *step_show_context + - *step_system_info + - *step_build_info + - *step_detect_commented_pr + - *step_checkout + - *step_artifact_suffix + - &step_download_artifact_iroha_deb + name: Download artifact + uses: actions/download-artifact@v2 + with: + name: iroha-deb${{env.ARTIFACT_SUFFIX}} + - &rename_artifact_deb + name: Rename artifact debs + run: | + mv *iroha_shepherd.deb docker/release/iroha_shepherd.deb + mv *irohad.deb docker/release/iroha.deb + - &step_dockertag_release + <<: *step_docker_tag + env: + dockertag: ${{ hashFiles('docker/release/**') }} + - <<: *step_docker_meta + with: + <<: *step_docker_meta_with + images: ${{ env.DOCKERHUB_ORG }}/${{ env.IMAGE_NAME }}${{ env._uses_suffix }} ## uses suffics could be empty, -burrow, -ursa + flavor: suffix=${{env._compiler_suffix}}${{env._debug_suffix}} + #maybetodo flavor: prefix=${{ env.USES_PREFIX }} ## In case creating repository hyperledger/iroha-burrow denied, Use tag prefix hyperledger/iroha:burrow-xxxx + - *step_docker_login + - *step_warn_docker_no_push + - *step_docker_buildx + - <<: *step_docker_cache + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-release-${{env.dockertag}} + restore-keys: ${{ runner.os }}-buildx-release + - <<: *step_docker_build_and_push + with: + <<: *step_docker_build_and_push_with + context: docker/release/ + push: ${{ steps.docker_login.outcome == 'success' && ( matrix.dockerpush == '' || matrix.dockerpush == 'yes' ) }} + - *step_docker_move_cache + + docker-D: + <<: *job_docker_image_release + needs: + - build-UD + - generate_matrixes + strategy: *strategy_ubuntu_debug + if: *if_ubuntu_debug + # env: + # <<: *env_dockerhub_release + # IMAGE_NAME: iroha-debug diff --git a/.github/chatops-gen-matrix.sh b/.github/chatops-gen-matrix.sh new file mode 100755 index 00000000000..083b3a66fa9 --- /dev/null +++ b/.github/chatops-gen-matrix.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash +set -euo pipefail +shopt -s lastpipe + +echowarn(){ + echo >&2 '::warning::'"$@" +} +echoerr(){ + echo >&2 '::error::'"$@" +} + +readonly ALL_oses="ubuntu macos windows" ALL_build_types="Debug Release" ALL_cmake_opts="normal burrow ursa" ALL_compilers="gcc-9 gcc-10 clang-10 clang llvm msvc" +readonly DEFAULT_oses="ubuntu macos windows" DEFAULT_build_types="Debug" DEFAULT_cmake_opts="normal burrow ursa" +readonly DEFAULT_ubuntu_compilers="gcc-9" AVAILABLE_ubuntu_compilers="gcc-9 gcc-10 clang-10" +readonly DEFAULT_macos_compilers="clang" AVAILABLE_macos_compilers="clang" ## Also "llvm gcc-10" but they fail +readonly DEFAULT_windows_compilers="msvc" AVAILABLE_windows_compilers="msvc" ## Also "clang mingw cygwin" but they are redundant + +--help-buildspec(){ + cat < 0 ]] ;do + case "$1" in + ## ARGUMENTS + help|--help|-h) + --help; exit + ;; + ## END ARGUMENTS + *) + cmdline+="$1 " + ;; + -*) + echoerr "Unknown argument '$1'" + exit 1 + ;; + esac + shift +done + +generate(){ + declare -rn DEFAULT_compilers=DEFAULT_${os}_compilers + declare -rn AVAILABLE_compilers=AVAILABLE_${os}_compilers + local compilers=${compilers:-$DEFAULT_compilers} + local cc bt op used_compilers= + for cc in $compilers ;do + if ! [[ " $AVAILABLE_compilers " = *" $cc "* ]] ;then + continue + fi + used_compilers+=$cc' ' + for bt in $build_types ;do + for co in $cmake_opts ;do + MATRIX+="$os $cc $bt $co"$'\n' + done + done + done + if test "$used_compilers" = ''; then + echowarn "No available compilers for '$os' among '$compilers', available: '$AVAILABLE_compilers'" + fi +} + +handle_user_line(){ + if [[ "$@" = '' ]] ;then + return + fi + if [[ "${1:-}" != '/build' ]] ;then + echowarn "Line skipped, should start with '/build'" + return + fi + shift + local oses compilers cmake_opts build_types + dockerpush=yes + + while [[ $# > 0 ]] ;do + case "$1" in + ## BUILDSPEC ARGUMENTS + ubuntu|linux) oses+=" ubuntu " ;; + macos) oses+=" $1 " ;; + windows) oses+=" $1 " ;; + normal) cmake_opts+=" $1 " ;; + burrow) cmake_opts+=" $1 " ;; + ursa) cmake_opts+=" $1 " ;; + release|Release) build_types+=" Release " ;; + debug|Debug) build_types+=" Debug" ;; + gcc|gcc-9|gcc9) compilers+=" gcc-9 " ;; + gcc-10|gcc10) compilers+=" gcc-10 " ;; + clang|clang-10|clang10) compilers+=" clang clang-10" ;; + llvm) compilers+=" $1 " ;; + clang) compilers+=" $1 " ;; + msvc) compilers+=" $1 " ;; + mingw) compilers+=" $1 " ;; + cygwin) compilers+=" $1 " ;; + dockerpush) dockerpush=yes ;; + nodockerpush) dockerpush=no ;; + all|everything|beforemerge|before_merge|before-merge|readytomerge|ready-to-merge|ready_to_merge) + oses="$ALL_oses" build_types="$ALL_build_types" cmake_opts="$ALL_cmake_opts" compilers="$ALL_compilers" + ;; + ## END BUILDSPEC ARGUMENTS + *) + echoerr "Unknown /build argument '$1'" + return 1 + ;; + esac + shift + done + + oses=${oses:-$DEFAULT_oses} + build_types=${build_types:-$DEFAULT_build_types} + cmake_opts=${cmake_opts:-$DEFAULT_cmake_opts} + + for os in $oses ;do + generate + done +} + +if test -z "$cmdline" ;then + while read input_line ;do + handle_user_line $input_line || continue + done +else + handle_user_line $cmdline || true +fi + +test -n "${MATRIX:-}" || + { echoerr "MATRIX is empty!"; --help-buildspec >&2; exit 1; } + + +############# FIXME remove this after build fixed ############# +echo "$MATRIX" | awk -v IGNORECASE=1 '!/gcc-9/ && /release/' | while read line ;do echo "'$line'" ;done | + echowarn "FIXME At the moment we are able to build Release only with GCC-9, other buildspecs are dropped: "$(cat) +MATRIX="$(echo "$MATRIX" | awk -v IGNORECASE=1 '!(!/gcc-9/ && /release/)' )" ##FIXME lifehack to disable always failing build during linkage +############# END fixme remove this after build fixed ######### + + +to_json(){ + echo "{ + os:\"$1\", + cc:\"$2\", + BuildType:\"$3\", + CMAKE_USE:\"$( [[ "$4" = normal ]] || echo "-DUSE_${4^^}=ON" )\", + dockerpush: \"$dockerpush\" + }" +} +to_json_multiline(){ + echo [ + comma='' + while read line ;do + # if [[ "" = "$line" ]] ;then continue ;fi + echo "$comma$(to_json $line)" + comma=, + done + echo ] +} +json_include(){ + jq -cn ".include=$(to_json_multiline)" +} + +MATRIX="$(echo "$MATRIX" | sed '/^$/d' | sort -uV)" +echo "$MATRIX" +echo "$MATRIX" | json_include >matrix +echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/' | json_include >matrix_ubuntu +echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/' | json_include >matrix_ubuntu_release +echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /debug/' | json_include >matrix_ubuntu_debug +echo "$MATRIX" | awk -v IGNORECASE=1 '/macos/' | json_include >matrix_macos +echo "$MATRIX" | awk -v IGNORECASE=1 '/windows/' | json_include >matrix_windows diff --git a/.github/issue_template.md b/.github/issue_template.md index 5b84ff5fa0a..25b1f3ce759 100644 --- a/.github/issue_template.md +++ b/.github/issue_template.md @@ -1,7 +1,5 @@ ## Hello! It is great that you've decided to contribute to Hyperledger Iroha by telling us more about the issue you've encountered! -The thing is that we now create and store issues only in [Hyperledger JIRA](https://jira.hyperledger.org/projects/IR) so we would kindly ask you to create your issue there. -That way you will be able to track it in JIRA with your [LFID](https://www.youtube.com/watch?v=EEc4JRyaAoA) account. -Still, if it is more convenient for you to create an issue on GitHub, our Yozhik Bot will carefully transfer it to JIRA and close the issue here. +To report the issue here, please, try to **explain** it as thoroughly as possible and **include Iroha version and logs**, if you have any, as well as your **environment information**: OS, specifications etc. \ No newline at end of file diff --git a/.github/make-workflows.sh b/.github/make-workflows.sh new file mode 100755 index 00000000000..a44d4126536 --- /dev/null +++ b/.github/make-workflows.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash +set -euo pipefail + + +--help(){ + cat<<'END' +make-workflows: + This script expands '*.src.yml' from $1 (default: script's directory) + to $2 (default:REPO_ROOT/.github/workflows) with corresponding name '*.yml' + Main goal is to dereference YAML anchors. + Deals only with Git cached/indexed files until --no-git-index passed. + DEBUG: use option -x + NOTE: spaces in filenames are not allowed to keep code simplicity +END + cat< 0 ]] ;do + case "$1" in + ## List files and get contents from working tree instead of git index + --no-git-index|--worktree) + files_list(){ + ls $@ + } + file_contents(){ + cat $@ + } + ;; + -x|--trace) set -x ;; + +x|--no-trace) set +x ;; + -h|--help|'-?') --help ;; + -*) + echo >&2 "make-workflows: ERROR: unxpected parameter" + --help >&2 + exit 2 + ;; + ## The last non-option argument is dir_to all previous are dirs_from + *) + if [[ "$1" = *' '* ]] ;then + echo >&2 "make-workflows: ERROR: spaces in arguments are not allowed: '$1'" + exit 1 + fi + if [[ "$(echo ${dirs_from:-})" = '' ]] ;then + dirs_from=$1 + else + dirs_from+=" "${dir_to:-} + dir_to=$1 + fi + ;; + esac + shift +done + +readonly script_dir=$(dirname $(realpath "$0")) +readonly dirs_from=${dirs_from:-${script_dir}} +readonly repo_root=$(git rev-parse --show-toplevel) + dir_to=${dir_to:-$repo_root/.github/workflows} +readonly dir_to=$(realpath $dir_to) +edited_files= + +for dir_from in $dirs_from ;do + pushd $dir_from >/dev/null + for f in $(files_list '*.src.yml') ;do + out=$(echo $f | sed 's|.src.yml$|.yml|') + wout=$dir_to/$out + tempout=$(mktemp) + trap "rm -f $tempout" EXIT ## in case of error file will be removed before exit + echo >>$tempout "## DO NOT EDIT" + echo >>$tempout "## Generated from $f with $(basename $0)" + echo >>$tempout "" + ## Take cached content from index + file_contents ./$f | yq eval 'explode(.)' - >>$tempout + if ! diff -q $wout $tempout &>/dev/null ;then + mv $tempout $wout + edited_files+="'$(realpath --relative-to=$OLDPWD $wout)' " + else + rm -f $tempout + fi + done + popd >/dev/null +done + +if [[ -n "$edited_files" ]] +then echo "make-workflows: these files were edited: $edited_files" +else echo "make-workflows: everything is up to date" +fi diff --git a/.github/pre-commit-hook.sh b/.github/pre-commit-hook.sh new file mode 100755 index 00000000000..62c8fdea887 --- /dev/null +++ b/.github/pre-commit-hook.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -euo pipefail +cd $(git rev-parse --show-toplevel) +./.github/make-workflows.sh +git add .github/workflows diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml new file mode 100644 index 00000000000..1b109622ed7 --- /dev/null +++ b/.github/workflows/build-iroha1.yml @@ -0,0 +1,1592 @@ +## DO NOT EDIT +## Generated from build-iroha1.src.yml with make-workflows.sh + +name: Iroha1 +## TODO 1. [vcpkg] build only Debug or only Release - reduce vcpkg build duration and output size 2times +## see directory triplets/, `vcpkg help triplets` and link: https://stackoverflow.com/a/52781832/3743145 +## TODO 2. [vcpkg] Do not rebuild vcpkg-tool every time [takes about 1min], see build_iroha_deps.sh +## TODO 3. [vcpkg] Use binarycaching on CI https://devblogs.microsoft.com/cppblog/vcpkg-accelerate-your-team-development-environment-with-binary-caching-and-manifests/ +## TODO 3. [speed] better caching utilizing diff-backup style tools like restic and rclone +## to improve performance and reduce storage consumption. +## Store ccache with rclone, maybe Store vcpkg binarycache with rclone +## The problem/pitfail is to get access token during build from fork +## TODO 4. [speed] Self-hosted MacOS and Windows +## Need more powerful Mac machine to reduce build time from 40min to 10min with hot vcpkg cache, +## and from 2hrs to 27min without cache +## GitHub's default runners also idles much time before started when build often +## TODO 5. [speed,optimization,resources] Cancel previous runs if their duration is less than 10 minutes, protect almost done builds from being killed +## TODO [prettify,documentation] update status badges in README.md +## TODO windows +## FIXME checkout issue, see https://github.com/actions/runner/issues/434 +## TODO actions/create-release for main branch and tags + +## CHEAT SHEET +## check if PR head repo is fork: ${{ github.event.pull_request.head.repo.fork }} +## check if PR is from other repo: ${{ github.event.pull_request.head.repo == github.event.pull_request.base.repo }} in this case secrets are empty +## ternary: ${{ fromJSON('["no", "yes"]')[github.ref != 'refs/heads/master'] }} + +## TODO make these different workflows - reduce number of conditionals inside jobs like 'step_detect_commented_pr' +on: + push: + branches: [main, support/1.*, edge, develop, test-ci, gha, gha/*, gha-*, \*-with-gha] + tags: ['v*'] + pull_request: + branches: [main, support/1.*, edge, develop] ## target branches + workflow_dispatch: + ## NOTE: Able to run via cmdline: gh workflow run Iroha1 + inputs: + build_spec: + description: 'See chatops-gen-matrix.sh, example "/build ubuntu macos gcc-9 burrow"' + required: false + default: '/build' + issue_comment: + types: [created, edited] + schedule: + - cron: '12 22 * * *' +jobs: + ## GitHub Actions Workflow does not support yaml anchors + ## and that is why there is a workaround with make-workflows.sh + ## You should `pre-commit install` or use `pre-commit-hook.sh`, + ## anyway please read .github/README.md + check_workflow_yaml_coressponds_to_src_yaml: + runs-on: ubuntu-latest + name: Check if github workflows were properly made from sources + steps: + - name: REF and SHA of commented PR to ENV + if: github.event.comment + run: > + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - run: sudo snap install yq + - name: Check if .github/workflows/*.yml correspond to *.src.yml + run: | + set -x + [[ $(./.github/make-workflows.sh -x --worktree) = *"everything is up to date" ]] + pr_comment_reaction_rocket: + ## Just to react to valid comment with rocket + runs-on: ubuntu-latest + if: ${{ github.event.comment && github.event.issue.pull_request && startsWith(github.event.comment.body, '/build') }} + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: Reaction + run: | + # send reaction to comment to show build was triggered + curl ${{github.event.comment.url}}/reactions \ + -X POST \ + -d '{"content":"rocket"}' \ + -H "Accept: application/vnd.github.squirrel-girl-preview+json" \ + -H "Authorization: token ${{github.token}}" + ## This job is to generate build matrixes for build jobs + ## The matrixes depend on what is requeted to be build + ## At the moment there are several options: + ## - default on pushes, pull requests + ## - on comment to pull request according to comment message (chat-ops) + ## - TODO on workflow_dispatch according to its build_spec + ## - TODO all on schedule + generate_matrixes: + runs-on: ubuntu-latest + if: ${{ (github.event_name != 'comment') || ( github.event.comment && github.event.issue.pull_request && startsWith(github.event.comment.body, '/build') ) }} + # needs: check_workflow_yaml_coressponds_to_src_yaml + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: Generate matrix for build triggered by chat-ops - comment to PR + if: github.event.comment && github.event.issue.pull_request + id: comment_body + run: echo "${{github.event.comment.body}}" >/tmp/comment_body + - name: Generate default matrix for regular builds + if: ${{ steps.comment_body.outcome == 'skipped' }} + run: | + set -x + commit_message_body_build_spec(){ + git fetch origin ${{github.event.after}} --depth=1 + git log --format=%B -n1 ${{github.event.after}} | grep '^/build ' + } + case ${{github.event_name}} in + pull_request) commit_message_body_build_spec >/tmp/comment_body || + echo >/tmp/comment_body "/build debug" ;; + push) commit_message_body_build_spec >/tmp/comment_body || { + echo "/build ubuntu debug release" + echo "/build macos debug" + echo "/build windows debug" + } >/tmp/comment_body ;; + schedule) echo >/tmp/comment_body "/build all" ;; + workflow_dispatch) echo >/tmp/comment_body "${{github.event.inputs.build_spec}}" ;; + *) echo >&2 "::error::Unexpected event"; false ;; + esac + - name: Generate matrixes + id: matrixes + run: | + set -x + cat /tmp/comment_body | .github/chatops-gen-matrix.sh + echo "::set-output name=matrix_ubuntu::$(cat matrix_ubuntu)" + echo "::set-output name=matrix_ubuntu_release::$(cat matrix_ubuntu_release)" + echo "::set-output name=matrix_ubuntu_debug::$(cat matrix_ubuntu_debug)" + echo "::set-output name=matrix_macos::$(cat matrix_macos)" + echo "::set-output name=matrix_windows::$(cat matrix_windows)" + ##TODO report errors and warnings as answer as issue comment (chat-ops) + - name: Reaction confused + if: failure() && github.event.comment + run: | + # send reaction to comment to show build was triggered + curl ${{github.event.comment.url}}/reactions \ + -X POST \ + -d '{"content":"confused"}' \ + -H "Accept: application/vnd.github.squirrel-girl-preview+json" \ + -H "Authorization: token ${{github.token}}" + - name: Reaction rocket + if: github.event.comment + run: | + # send reaction to comment to show build was triggered + curl ${{github.event.comment.url}}/reactions \ + -X POST \ + -d '{"content":"rocket"}' \ + -H "Accept: application/vnd.github.squirrel-girl-preview+json" \ + -H "Authorization: token ${{github.token}}" + outputs: + matrix_ubuntu: ${{steps.matrixes.outputs.matrix_ubuntu}} + matrix_ubuntu_release: ${{steps.matrixes.outputs.matrix_ubuntu_release}} + matrix_ubuntu_debug: ${{steps.matrixes.outputs.matrix_ubuntu_debug}} + matrix_macos: ${{steps.matrixes.outputs.matrix_macos}} + matrix_windows: ${{steps.matrixes.outputs.matrix_windows}} + ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies + ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, + ## and conditional tags :edge (for development branches) and :latest (for git-tags) + ## Note: image is push only when DockerHub login-token pair available - not to PRs from forks + Docker-iroha-builder: + needs: check_workflow_yaml_coressponds_to_src_yaml + runs-on: ubuntu-latest #[ self-hosted, Linux ] + env: + DOCKERHUB_ORG: hyperledger ## Must be hyperledger, also can use iroha1, cannot use ${{ secrets.DOCKERHUB_ORG }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: System info + run: | + set -x + whoami + id $(whoami) + free || vm_stat | perl -ne '/page size of (\d+)/ and $size=$1; + /Pages\s+([^:]+)[^\d]+(\d+)/ and printf("%-16s % 16.2f Mi\n", "$1:", $2 * $size / 1048576);' + df -h + - name: Build info + run: | + cat << 'END' + ref:${{github.ref}} + sha:${{github.sha}} + run_number:${{github.run_number}} + event_name:${{github.event_name}} + event.action:${{github.event.action}} + event.issue.number:${{ github.event.issue.number }} + END + - name: REF and SHA of commented PR to ENV + if: github.event.comment + run: > + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: Determine dockertag + id: dockertag + env: + dockertag: ${{ hashFiles('docker/iroha-builder/**') }} + run: | + echo "::set-output name=dockertag::$dockertag" + echo >>$GITHUB_ENV dockertag=$dockertag + test -n "$DOCKERHUB_ORG" || { + echo ::error::"DOCKERHUB_ORG must contain value" + false + } + - name: Login to DockerHub + if: ${{ env.DOCKERHUB_TOKEN != '' && env.DOCKERHUB_USERNAME != '' }} + id: docker_login + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Possible WARNING + if: ${{ steps.docker_login.outcome == 'skipped' }} + run: echo "::warning::DOCKERHUB_TOKEN and DOCKERHUB_USERNAME are empty. Will build but NOT push." + - name: Docker meta + id: meta + uses: docker/metadata-action@v3 + with: + images: ${{ env.DOCKERHUB_ORG }}/iroha-builder + tags: | + type=raw,value=${{env.dockertag}} + type=ref,event=branch + type=ref,event=pr + type=ref,event=tag + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=schedule + type=edge,branch=support/1.2.x + type=edge,branch=develop + type=edge,branch=test-ci + type=sha,prefix=commit-,format=short + type=sha,prefix=commit-,format=long + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed + ## - tag 'edge' when branch support/1.2.x is pushed + ## - schedule, see the docs + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{env.dockertag}} + restore-keys: ${{ runner.os }}-buildx- + - id: build_and_push + name: Build and push + uses: docker/build-push-action@v2 + with: + context: docker/iroha-builder/ + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new + push: ${{ steps.docker_login.outcome == 'success' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + - # Temp fix + # https://github.com/docker/build-push-action/issues/252 + # https://github.com/moby/buildkit/issues/1896 + name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + - name: Check if dockertaghash exists in remote registry + id: dockertag_already + run: | + exists=$( curl -fL https://hub.docker.com/v2/repositories/$DOCKERHUB_ORG/iroha-builder/tags | + jq 'any( .results[]|.name == "${{env.dockertag}}" ; .)' ) + echo "::set-output name=exists::$exists" + + if test $exists = true ;then + tag=$dockertag + else + tag=edge + fi + container="$DOCKERHUB_ORG/iroha-builder:$tag" + echo "::set-output name=container::$container" + echo "::set-output name=container_tag::$tag" + outputs: + ## WARN secret dropped from output!, output may not contain secret, + ## and secret cannot be used in job:container directly, and there is no github non-secret variables... + ## if dockertag is already pushed then use it. But let it be empty when tag does not exist remotely. + dockertag: ${{steps.dockertag.outputs.dockertag}} + _dockertag: :${{steps.dockertag.outputs.dockertag}} + pushed: ${{ steps.docker_login.outcome == 'success' && steps.build_and_push.outcome == 'success' }} + dockertag_already_exists: ${{steps.dockertag_already.outputs.exists}} + container: ${{steps.dockertag_already.outputs.container}} + container_tag: ${{steps.dockertag_already.outputs.container_tag}} + ## Build iroha in a container made of the image earlier prepared + ## Result artifacts are + ## - stand-alone irohad (linked statically) + ## - iroha.deb (with irohad inside) + build-UD: + needs: + - Docker-iroha-builder + - generate_matrixes + runs-on: [self-hosted, Linux] + ## Container is taken from previous job + container: #&container + image: ${{needs.Docker-iroha-builder.outputs.container}} + options: --user root + strategy: + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_debug ) }} + if: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_debug ).include[0] }} + defaults: + run: + shell: bash + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: Show needs + run: | + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + - run: test -n "$container" + env: + container: ${{needs.Docker-iroha-builder.outputs.container}} + - if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || needs.Docker-iroha-builder.outputs.container == '' }} + name: Possible WARNING + env: + container: ${{needs.Docker-iroha-builder.outputs.container}} + dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} + run: | + cat < + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: export CC and CXX + env: + CC: ${{matrix.CC}} + CCACHE_PATH: /usr/lib/ccache + run: | + set -xeu #o pipefail + if test $CC = llvm ;then CC=/usr/local/opt/llvm/bin/clang ;fi + echo >>$GITHUB_ENV CC=$CC + echo >>$GITHUB_ENV CXX=$(echo $CC | sed -es,gcc,g++, -es,clang,clang++,) + echo >>$GITHUB_PATH $CCACHE_PATH + ls -lA $CCACHE_PATH + $(realpath $CCACHE_PATH/gcc) --show-config + echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p') + echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}') + - name: Restore cache ccache + uses: actions/cache@v2 + with: + path: ${{ env._CCACHE_DIR }} + key: ${{ runner.os }}-ccache + - run: ccache --show-stats | tee /tmp/ccache-stats + - if: ${{false}} ## This works bad when something patched or something updated, seems they does not recalc hash of changed packages. See todos in the begining of file. + ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md + name: Restore cache vcpkg + uses: actions/cache@v2 + with: + path: | + ${{ env.HOME }}/.cache/vcpkg/archives + # $HOME/.cache/vcpkg + # build-vcpkg/installed + # build/vcpkg_installed ## This is default folder for manual installation in manifest mode + key: ${{ runner.os }}-${{matrix.CC}}-vcpkg + # key: ${{ runner.os }}-vcpkg-${{matrix.CC}}-${{ hashFiles('build-vcpkg/installed/vcpkg/status') }} + # restore-keys: ${{ runner.os }}-vcpkg-${{matrix.CC}}- + - name: Build iroha vcpkg dependancies + run: ./vcpkg/build_iroha_deps.sh $PWD/build-vcpkg; test -f $PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake + ## Takes 48m16s on default GitHub runner with 2 cores + ## Takes 13m41s on self-hosted AWS EC2 c5.x4large + # ________________________________________________________ + # Executed in 32,08 mins fish external + # usr time 110,52 mins 0,24 millis 110,52 mins + # sys time 12,26 mins 1,34 millis 12,26 mins + # + # All requested packages are currently installed. + # ________________________________________________________ + # Executed in 3,17 secs fish external + # usr time 2,05 secs 128,00 micros 2,05 secs + # sys time 0,70 secs 575,00 micros 0,70 secs + - name: CMake configure + ## Takes 13s on regular GitHub runner + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ matrix.BuildType }} -GNinja ${{ matrix.CMAKE_USE }} -DTESTING=ON -DPACKAGE_DEB=ON + #-DCMAKE_VERBOSE_MAKEFILE=ON + - name: CMake build + run: | + set -x + ## reduce memory usage to do not overflow + cmake --build build --config ${{ matrix.BuildType }} -- -j$(nproc | awk '{printf("%.0f",$1*0.77)}') + ## Debug takes 18m44s on regular GitHub runner + ## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large + ## Release takes 2m58s on self-hosted AWS EC2 c5.x4large + - name: CPack (linux only) + run: cd build; cpack; ## cmake --build build --target package + - run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true + - name: Show free space and disk usage + if: ${{ always() }} + run: | + df -h || true + - name: Generate artifact suffix depending on matrix + env: + os: ubuntu ## maybe ${{ run.os }} + CC: ${{ matrix.CC }} + BuildType: ${{ matrix.BuildType }} + CMAKE_USE: ${{ matrix.CMAKE_USE }} + run: | + set -x + cc=$(echo $CC | sed -Ee's,[-/],,g' ) + build_type=$(echo $BuildType | tr A-Z a-z | sed -E -es,debug,dbg, -es,release,rel, ) + test $build_type = dbg -o $build_type = rel + uses=$(echo $CMAKE_USE | + tr ' ' '\n' | sed -nE -e's,.*USE_([a-zA-Z]+)=ON.*,\1,gp ; s, ,-, ;' | xargs | tr ' ' - | tr A-Z a-z ) + _os=${os:+-$os} _cc=${cc:+-$cc} _build_type=${build_type:+-$build_type} _uses=${uses:+-$uses} + echo >>$GITHUB_ENV ARTIFACT_SUFFIX=$_os$_cc$_build_type$_uses + echo >>$GITHUB_ENV _uses_suffix=$_uses + echo >>$GITHUB_ENV _compiler_suffix=$(test $cc != gcc9 && echo $_cc) + echo >>$GITHUB_ENV _debug_suffix=$(test "$build_type" = dbg && echo -debug || true) + # echo >>$GITHUB_ENV _os=$_os _cc=$_cc _build_type=$_build_type _uses=$_uses + - name: Upload artifact irohad + uses: actions/upload-artifact@v2 + with: + name: irohad${{env.ARTIFACT_SUFFIX}} + path: | + build/bin/irohad + build/bin/iroha-cli + - name: Upload artifact iroha-deb + uses: actions/upload-artifact@v2 + with: + name: iroha-deb${{env.ARTIFACT_SUFFIX}} + path: | + build/*.deb + - if: ${{ false }} ## Maybe test in another job + name: Upload artifact tests + uses: actions/upload-artifact@v2 + with: + name: iroha-tests-ubuntu${{env.ARTIFACT_SUFFIX}} + path: | + build/test_bin/** + build/test_data/** + - timeout-minutes: 40 + name: CTest + run: | + set -xeuo pipefail + if test $(uname) = Darwin ;then + ## This is a common portable solution, but Debian and Ubuntu have their own wrappers + initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database + postgres -D $PWD/postgres_database -p5432 2>&1 >/tmp/postgres.log & { sleep .3; kill -0 $!; } ## use pg_ctl no need & + else + mkdir postgres_database && chown iroha-ci postgres_database + echo /usr/lib/postgresql/12/bin/initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database | su iroha-ci + echo /usr/lib/postgresql/12/bin/pg_ctl start -D $PWD/postgres_database --log=$PWD/postgres_database/log | su iroha-ci + # ## Need to go debian-specific way because + # ## initdb is not allowed to be run as root, but we need to run as root + # ## because GitHub actions runners have much issues with permissions. + # cat </etc/postgresql/12/main/pg_hba.conf + # # TYPE DATABASE USER ADDRESS METHOD + # local all all trust + # host all all 127.0.0.1/32 trust + # host all all ::1/128 trust + # local replication all trust + # host replication all 127.0.0.1/32 trust + # host replication all ::1/128 trust + # END + # pg_ctlcluster 12 main start ## Cluster 'main' exist by default + # #OR pg_createcluster -p 5432 --start 12 iroha -- --locale=C --encoding=UTF-8 --username=postgres + fi + ## Run module_* tests in parallel and others subsequently + cd build + ## FIXME dissallow to fail, remove '||true' after ctest + cat | sort -u >ALLOW_TO_FAIL </dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: Show needs + run: | + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + - run: test -n "$container" + env: + container: ${{needs.Docker-iroha-builder.outputs.container}} + - if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || needs.Docker-iroha-builder.outputs.container == '' }} + name: Possible WARNING + env: + container: ${{needs.Docker-iroha-builder.outputs.container}} + dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} + run: | + cat < + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: export CC and CXX + env: + CC: ${{matrix.CC}} + CCACHE_PATH: /usr/lib/ccache + run: | + set -xeu #o pipefail + if test $CC = llvm ;then CC=/usr/local/opt/llvm/bin/clang ;fi + echo >>$GITHUB_ENV CC=$CC + echo >>$GITHUB_ENV CXX=$(echo $CC | sed -es,gcc,g++, -es,clang,clang++,) + echo >>$GITHUB_PATH $CCACHE_PATH + ls -lA $CCACHE_PATH + $(realpath $CCACHE_PATH/gcc) --show-config + echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p') + echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}') + - name: Restore cache ccache + uses: actions/cache@v2 + with: + path: ${{ env._CCACHE_DIR }} + key: ${{ runner.os }}-ccache + - run: ccache --show-stats | tee /tmp/ccache-stats + - if: ${{false}} ## This works bad when something patched or something updated, seems they does not recalc hash of changed packages. See todos in the begining of file. + ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md + name: Restore cache vcpkg + uses: actions/cache@v2 + with: + path: | + ${{ env.HOME }}/.cache/vcpkg/archives + # $HOME/.cache/vcpkg + # build-vcpkg/installed + # build/vcpkg_installed ## This is default folder for manual installation in manifest mode + key: ${{ runner.os }}-${{matrix.CC}}-vcpkg + # key: ${{ runner.os }}-vcpkg-${{matrix.CC}}-${{ hashFiles('build-vcpkg/installed/vcpkg/status') }} + # restore-keys: ${{ runner.os }}-vcpkg-${{matrix.CC}}- + - name: Build iroha vcpkg dependancies + run: ./vcpkg/build_iroha_deps.sh $PWD/build-vcpkg; test -f $PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake + ## Takes 48m16s on default GitHub runner with 2 cores + ## Takes 13m41s on self-hosted AWS EC2 c5.x4large + # ________________________________________________________ + # Executed in 32,08 mins fish external + # usr time 110,52 mins 0,24 millis 110,52 mins + # sys time 12,26 mins 1,34 millis 12,26 mins + # + # All requested packages are currently installed. + # ________________________________________________________ + # Executed in 3,17 secs fish external + # usr time 2,05 secs 128,00 micros 2,05 secs + # sys time 0,70 secs 575,00 micros 0,70 secs + - name: CMake configure + ## Takes 13s on regular GitHub runner + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ matrix.BuildType }} -GNinja ${{ matrix.CMAKE_USE }} -DTESTING=ON -DPACKAGE_DEB=ON + #-DCMAKE_VERBOSE_MAKEFILE=ON + - name: CMake build + run: | + set -x + ## reduce memory usage to do not overflow + cmake --build build --config ${{ matrix.BuildType }} -- -j$(nproc | awk '{printf("%.0f",$1*0.77)}') + ## Debug takes 18m44s on regular GitHub runner + ## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large + ## Release takes 2m58s on self-hosted AWS EC2 c5.x4large + - name: CPack (linux only) + run: cd build; cpack; ## cmake --build build --target package + - run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true + - name: Show free space and disk usage + if: ${{ always() }} + run: | + df -h || true + - name: Generate artifact suffix depending on matrix + env: + os: ubuntu ## maybe ${{ run.os }} + CC: ${{ matrix.CC }} + BuildType: ${{ matrix.BuildType }} + CMAKE_USE: ${{ matrix.CMAKE_USE }} + run: | + set -x + cc=$(echo $CC | sed -Ee's,[-/],,g' ) + build_type=$(echo $BuildType | tr A-Z a-z | sed -E -es,debug,dbg, -es,release,rel, ) + test $build_type = dbg -o $build_type = rel + uses=$(echo $CMAKE_USE | + tr ' ' '\n' | sed -nE -e's,.*USE_([a-zA-Z]+)=ON.*,\1,gp ; s, ,-, ;' | xargs | tr ' ' - | tr A-Z a-z ) + _os=${os:+-$os} _cc=${cc:+-$cc} _build_type=${build_type:+-$build_type} _uses=${uses:+-$uses} + echo >>$GITHUB_ENV ARTIFACT_SUFFIX=$_os$_cc$_build_type$_uses + echo >>$GITHUB_ENV _uses_suffix=$_uses + echo >>$GITHUB_ENV _compiler_suffix=$(test $cc != gcc9 && echo $_cc) + echo >>$GITHUB_ENV _debug_suffix=$(test "$build_type" = dbg && echo -debug || true) + # echo >>$GITHUB_ENV _os=$_os _cc=$_cc _build_type=$_build_type _uses=$_uses + - name: Upload artifact irohad + uses: actions/upload-artifact@v2 + with: + name: irohad${{env.ARTIFACT_SUFFIX}} + path: | + build/bin/irohad + build/bin/iroha-cli + - name: Upload artifact iroha-deb + uses: actions/upload-artifact@v2 + with: + name: iroha-deb${{env.ARTIFACT_SUFFIX}} + path: | + build/*.deb + - if: ${{ false }} ## Maybe test in another job + name: Upload artifact tests + uses: actions/upload-artifact@v2 + with: + name: iroha-tests-ubuntu${{env.ARTIFACT_SUFFIX}} + path: | + build/test_bin/** + build/test_data/** + - timeout-minutes: 40 + name: CTest + run: | + set -xeuo pipefail + if test $(uname) = Darwin ;then + ## This is a common portable solution, but Debian and Ubuntu have their own wrappers + initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database + postgres -D $PWD/postgres_database -p5432 2>&1 >/tmp/postgres.log & { sleep .3; kill -0 $!; } ## use pg_ctl no need & + else + mkdir postgres_database && chown iroha-ci postgres_database + echo /usr/lib/postgresql/12/bin/initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database | su iroha-ci + echo /usr/lib/postgresql/12/bin/pg_ctl start -D $PWD/postgres_database --log=$PWD/postgres_database/log | su iroha-ci + # ## Need to go debian-specific way because + # ## initdb is not allowed to be run as root, but we need to run as root + # ## because GitHub actions runners have much issues with permissions. + # cat </etc/postgresql/12/main/pg_hba.conf + # # TYPE DATABASE USER ADDRESS METHOD + # local all all trust + # host all all 127.0.0.1/32 trust + # host all all ::1/128 trust + # local replication all trust + # host replication all 127.0.0.1/32 trust + # host replication all ::1/128 trust + # END + # pg_ctlcluster 12 main start ## Cluster 'main' exist by default + # #OR pg_createcluster -p 5432 --start 12 iroha -- --locale=C --encoding=UTF-8 --username=postgres + fi + ## Run module_* tests in parallel and others subsequently + cd build + ## FIXME dissallow to fail, remove '||true' after ctest + cat | sort -u >ALLOW_TO_FAIL </dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + build-M: + needs: + - prepare-macos-env + - generate_matrixes + runs-on: macos-latest #[ self-hosted, MacOS ] # + strategy: + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_macos ) }} + if: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_macos ).include[0] }} + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: System info + run: | + set -x + whoami + id $(whoami) + free || vm_stat | perl -ne '/page size of (\d+)/ and $size=$1; + /Pages\s+([^:]+)[^\d]+(\d+)/ and printf("%-16s % 16.2f Mi\n", "$1:", $2 * $size / 1048576);' + df -h + - name: Build info + run: | + cat << 'END' + ref:${{github.ref}} + sha:${{github.sha}} + run_number:${{github.run_number}} + event_name:${{github.event_name}} + event.action:${{github.event.action}} + event.issue.number:${{ github.event.issue.number }} + END + - name: Homebrew + run: brew install cmake ninja coreutils ccache + ## Takes 22 seconds with default github runner + - if: ${{ contains(matrix.CC, 'gcc-10') }} + name: Homebrew GCC + run: brew install gcc@10 + - if: ${{ contains(matrix.CC, 'llvm') }} + name: Homebrew LLVM + run: brew install llvm + - if: ${{ contains(matrix.CMAKE_USE, '-DUSE_BURROW=ON') }} + name: Install protoc-gen-go for -DUSE_BURROW=ON + run: | + go get github.com/golang/protobuf/protoc-gen-go + echo >>$GITHUB_PATH $HOME/go/bin + - name: REF and SHA of commented PR to ENV + if: github.event.comment + run: > + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: export CC and CXX + run: | + set -xeu #o pipefail + if test $CC = llvm ;then CC=/usr/local/opt/llvm/bin/clang ;fi + echo >>$GITHUB_ENV CC=$CC + echo >>$GITHUB_ENV CXX=$(echo $CC | sed -es,gcc,g++, -es,clang,clang++,) + echo >>$GITHUB_PATH $CCACHE_PATH + ls -lA $CCACHE_PATH + $(realpath $CCACHE_PATH/gcc) --show-config + echo >>$GITHUB_ENV _CCACHE_DIR=$($(realpath $CCACHE_PATH/gcc) --show-config | sed -nE 's,.*cache_dir = ,,p') + echo >>$GITHUB_ENV NPROC=$(nproc | awk '{printf("%.0f",$1*0.77)}') + env: + CC: ${{matrix.CC}} + CCACHE_PATH: /usr/local/opt/ccache/libexec + - name: Restore cache ccache + uses: actions/cache@v2 + with: + path: ${{ env._CCACHE_DIR }} + key: ${{ runner.os }}-ccache + - run: ccache --show-stats | tee /tmp/ccache-stats + - if: ${{false}} ## This works bad when something patched or something updated, seems they does not recalc hash of changed packages. See todos in the begining of file. + ## Read the docs https://vcpkg.readthedocs.io/en/latest/users/binarycaching/ https://github.com/microsoft/vcpkg/blob/master/docs/users/binarycaching.md + name: Restore cache vcpkg + uses: actions/cache@v2 + with: + path: | + ${{ env.HOME }}/.cache/vcpkg/archives + # $HOME/.cache/vcpkg + # build-vcpkg/installed + # build/vcpkg_installed ## This is default folder for manual installation in manifest mode + key: ${{ runner.os }}-${{matrix.CC}}-vcpkg + # key: ${{ runner.os }}-vcpkg-${{matrix.CC}}-${{ hashFiles('build-vcpkg/installed/vcpkg/status') }} + # restore-keys: ${{ runner.os }}-vcpkg-${{matrix.CC}}- + - name: Build iroha vcpkg dependancies + run: ./vcpkg/build_iroha_deps.sh $PWD/build-vcpkg; test -f $PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake + ## Takes 48m16s on default GitHub runner with 2 cores + ## Takes 13m41s on self-hosted AWS EC2 c5.x4large + # ________________________________________________________ + # Executed in 32,08 mins fish external + # usr time 110,52 mins 0,24 millis 110,52 mins + # sys time 12,26 mins 1,34 millis 12,26 mins + # + # All requested packages are currently installed. + # ________________________________________________________ + # Executed in 3,17 secs fish external + # usr time 2,05 secs 128,00 micros 2,05 secs + # sys time 0,70 secs 575,00 micros 0,70 secs + - name: CMake configure + ## Takes 13s on regular GitHub runner + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ matrix.BuildType }} -GNinja ${{ matrix.CMAKE_USE }} -DTESTING=ON -DPACKAGE_DEB=ON + #-DCMAKE_VERBOSE_MAKEFILE=ON + - name: CMake build + run: | + set -x + ## reduce memory usage to do not overflow + cmake --build build --config ${{ matrix.BuildType }} -- -j$(nproc | awk '{printf("%.0f",$1*0.77)}') + ## Debug takes 18m44s on regular GitHub runner + ## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large + ## Release takes 2m58s on self-hosted AWS EC2 c5.x4large + - run: ccache --show-stats | diff --side-by-side /tmp/ccache-stats - ||true + - name: Show free space and disk usage + if: ${{ always() }} + run: | + df -h || true + - run: | + set -x + cc=$(echo $CC | sed -Ee's,[-/],,g' ) + build_type=$(echo $BuildType | tr A-Z a-z | sed -E -es,debug,dbg, -es,release,rel, ) + test $build_type = dbg -o $build_type = rel + uses=$(echo $CMAKE_USE | + tr ' ' '\n' | sed -nE -e's,.*USE_([a-zA-Z]+)=ON.*,\1,gp ; s, ,-, ;' | xargs | tr ' ' - | tr A-Z a-z ) + _os=${os:+-$os} _cc=${cc:+-$cc} _build_type=${build_type:+-$build_type} _uses=${uses:+-$uses} + echo >>$GITHUB_ENV ARTIFACT_SUFFIX=$_os$_cc$_build_type$_uses + echo >>$GITHUB_ENV _uses_suffix=$_uses + echo >>$GITHUB_ENV _compiler_suffix=$(test $cc != gcc9 && echo $_cc) + echo >>$GITHUB_ENV _debug_suffix=$(test "$build_type" = dbg && echo -debug || true) + # echo >>$GITHUB_ENV _os=$_os _cc=$_cc _build_type=$_build_type _uses=$_uses + + name: Generate artifact suffix >> env.ARTIFACT_SUFFIX + #run: inherited + env: + CC: ${{ matrix.CC }} + BuildType: ${{ matrix.BuildType }} + CMAKE_USE: ${{ matrix.CMAKE_USE }} + os: macos ##${{run.os}} + - name: Upload artifact irohad + uses: actions/upload-artifact@v2 + with: + name: irohad-macos${{env.ARTIFACT_SUFFIX}} + path: | + build/bin/irohad + build/bin/iroha-cli + - if: ${{ false }} ## Maybe test in another job + name: Upload artifact tests + uses: actions/upload-artifact@v2 + with: + name: iroha-tests-ubuntu${{env.ARTIFACT_SUFFIX}} + path: | + build/test_bin/** + build/test_data/** + - name: Install Postgres on MacOS + run: brew install postgresql + ## ToDo may be optimize, i.e. cache package + - name: CTest + run: | + set -xeuo pipefail + if test $(uname) = Darwin ;then + ## This is a common portable solution, but Debian and Ubuntu have their own wrappers + initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database + postgres -D $PWD/postgres_database -p5432 2>&1 >/tmp/postgres.log & { sleep .3; kill -0 $!; } ## use pg_ctl no need & + else + mkdir postgres_database && chown iroha-ci postgres_database + echo /usr/lib/postgresql/12/bin/initdb --locale=C --encoding=UTF-8 --username=postgres $PWD/postgres_database | su iroha-ci + echo /usr/lib/postgresql/12/bin/pg_ctl start -D $PWD/postgres_database --log=$PWD/postgres_database/log | su iroha-ci + # ## Need to go debian-specific way because + # ## initdb is not allowed to be run as root, but we need to run as root + # ## because GitHub actions runners have much issues with permissions. + # cat </etc/postgresql/12/main/pg_hba.conf + # # TYPE DATABASE USER ADDRESS METHOD + # local all all trust + # host all all 127.0.0.1/32 trust + # host all all ::1/128 trust + # local replication all trust + # host replication all 127.0.0.1/32 trust + # host replication all ::1/128 trust + # END + # pg_ctlcluster 12 main start ## Cluster 'main' exist by default + # #OR pg_createcluster -p 5432 --start 12 iroha -- --locale=C --encoding=UTF-8 --username=postgres + fi + ## Run module_* tests in parallel and others subsequently + cd build + ## FIXME dissallow to fail, remove '||true' after ctest + cat | sort -u >ALLOW_TO_FAIL </dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + defaults: + run: + shell: bash + build-W: + needs: + - prepare-windows-env + - generate_matrixes + runs-on: windows-latest + strategy: + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_windows ) }} + #if: ${{ false }} ##FIXME Somehow cmake fails to find GTest and others + if: ${{ false && ( fromJSON( needs.generate_matrixes.outputs.matrix_windows ).include[0] ) }} + # matrix: + # BuildYype: [ Debug ] #,Release, RelWithDebInfo + defaults: + run: + shell: bash #pwsh + working-directory: 'C:\github\iroha' ## Use disk C: because D: is out of space + steps: + - name: Create working-directory, export WORKDIR + run: | + set -x + mkdir -p "$WORKDIR" + echo $PWD + echo >>$GITHUB_ENV WORKDIR="$WORKDIR" + working-directory: 'C:\' + env: {WORKDIR: 'C:\github\iroha'} + - name: uname in bash + run: uname + shell: bash + - name: uname in [default] pwsh shell + run: uname + shell: pwsh + - name: Chocolatey install + run: choco install cmake ninja #ccache + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: move to workdir + run: | + set -x + echo $PWD + shopt -s dotglob nullglob + mv -vf * -t "$WORKDIR" + working-directory: + #- *step_restore_ccache + #- *step_vcpkg_cache + - name: Build iroha vcpkg dependancies + run: ./vcpkg/build_iroha_deps.sh $PWD/build-vcpkg; test -f $PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake + ## Takes 48m16s on default GitHub runner with 2 cores + ## Takes 13m41s on self-hosted AWS EC2 c5.x4large + # ________________________________________________________ + # Executed in 32,08 mins fish external + # usr time 110,52 mins 0,24 millis 110,52 mins + # sys time 12,26 mins 1,34 millis 12,26 mins + # + # All requested packages are currently installed. + # ________________________________________________________ + # Executed in 3,17 secs fish external + # usr time 2,05 secs 128,00 micros 2,05 secs + # sys time 0,70 secs 575,00 micros 0,70 secs + - name: CMake configure + ## Takes 13s on regular GitHub runner + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/build-vcpkg/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ matrix.BuildType }} -GNinja ${{ matrix.CMAKE_USE }} -DTESTING=ON -DPACKAGE_DEB=ON + #-DCMAKE_VERBOSE_MAKEFILE=ON + - name: CMake build + run: | + set -x + ## reduce memory usage to do not overflow + cmake --build build --config ${{ matrix.BuildType }} -- -j$(nproc | awk '{printf("%.0f",$1*0.77)}') + ## Debug takes 18m44s on regular GitHub runner + ## Debug takes 7m41s on self-hosted AWS EC2 c5.x4large + ## Release takes 2m58s on self-hosted AWS EC2 c5.x4large + - name: Show free space and disk usage + if: ${{ always() }} + run: | + df -h || true + - name: Install Postgres on Windows + run: choco install postgresql + # - *step_ctest + ## Build and publish docker image named 'hyperledger/iroha' with irohad and iroha tools inside. + ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, + ## and conditional tags :edge (for development branches) and :latest (for git-tags) + ## Those docker image tags could be extended with suffixes with compiler and build type like + ## -gcc10, -clang, -debug, -gcc10-debug. + ## Result image name could look like: hyperledger/iroha:pr-1117, hyperledger/iroha-burrow:commit-XXXXX-debug + ## Note: image is push only when DockerHub login-token pair available - not to PRs from forks + docker-R: + needs: + - build-UR + - generate_matrixes + runs-on: [self-hosted, Linux] ## or ubuntu-latest + strategy: + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_release ) }} + if: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_release ).include[0] }} + env: + DOCKERHUB_ORG: hyperledger ## Must be hyperledger, also can use iroha1, cannot use ${{ secrets.DOCKERHUB_ORG }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + IMAGE_NAME: iroha + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: System info + run: | + set -x + whoami + id $(whoami) + free || vm_stat | perl -ne '/page size of (\d+)/ and $size=$1; + /Pages\s+([^:]+)[^\d]+(\d+)/ and printf("%-16s % 16.2f Mi\n", "$1:", $2 * $size / 1048576);' + df -h + - name: Build info + run: | + cat << 'END' + ref:${{github.ref}} + sha:${{github.sha}} + run_number:${{github.run_number}} + event_name:${{github.event_name}} + event.action:${{github.event.action}} + event.issue.number:${{ github.event.issue.number }} + END + - name: REF and SHA of commented PR to ENV + if: github.event.comment + run: > + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: Generate artifact suffix depending on matrix + env: + os: ubuntu ## maybe ${{ run.os }} + CC: ${{ matrix.CC }} + BuildType: ${{ matrix.BuildType }} + CMAKE_USE: ${{ matrix.CMAKE_USE }} + run: | + set -x + cc=$(echo $CC | sed -Ee's,[-/],,g' ) + build_type=$(echo $BuildType | tr A-Z a-z | sed -E -es,debug,dbg, -es,release,rel, ) + test $build_type = dbg -o $build_type = rel + uses=$(echo $CMAKE_USE | + tr ' ' '\n' | sed -nE -e's,.*USE_([a-zA-Z]+)=ON.*,\1,gp ; s, ,-, ;' | xargs | tr ' ' - | tr A-Z a-z ) + _os=${os:+-$os} _cc=${cc:+-$cc} _build_type=${build_type:+-$build_type} _uses=${uses:+-$uses} + echo >>$GITHUB_ENV ARTIFACT_SUFFIX=$_os$_cc$_build_type$_uses + echo >>$GITHUB_ENV _uses_suffix=$_uses + echo >>$GITHUB_ENV _compiler_suffix=$(test $cc != gcc9 && echo $_cc) + echo >>$GITHUB_ENV _debug_suffix=$(test "$build_type" = dbg && echo -debug || true) + # echo >>$GITHUB_ENV _os=$_os _cc=$_cc _build_type=$_build_type _uses=$_uses + - name: Download artifact + uses: actions/download-artifact@v2 + with: + name: iroha-deb${{env.ARTIFACT_SUFFIX}} + - name: Rename artifact debs + run: | + mv *iroha_shepherd.deb docker/release/iroha_shepherd.deb + mv *irohad.deb docker/release/iroha.deb + - name: Determine dockertag + id: dockertag + run: | + echo "::set-output name=dockertag::$dockertag" + echo >>$GITHUB_ENV dockertag=$dockertag + test -n "$DOCKERHUB_ORG" || { + echo ::error::"DOCKERHUB_ORG must contain value" + false + } + env: + dockertag: ${{ hashFiles('docker/release/**') }} + - name: Docker meta + id: meta + uses: docker/metadata-action@v3 + with: + tags: | + type=raw,value=${{env.dockertag}} + type=ref,event=branch + type=ref,event=pr + type=ref,event=tag + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=schedule + type=edge,branch=support/1.2.x + type=edge,branch=develop + type=edge,branch=test-ci + type=sha,prefix=commit-,format=short + type=sha,prefix=commit-,format=long + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed + ## - tag 'edge' when branch support/1.2.x is pushed + ## - schedule, see the docs + + images: ${{ env.DOCKERHUB_ORG }}/${{ env.IMAGE_NAME }}${{ env._uses_suffix }} ## uses suffics could be empty, -burrow, -ursa + flavor: suffix=${{env._compiler_suffix}}${{env._debug_suffix}} + #maybetodo flavor: prefix=${{ env.USES_PREFIX }} ## In case creating repository hyperledger/iroha-burrow denied, Use tag prefix hyperledger/iroha:burrow-xxxx + - name: Login to DockerHub + if: ${{ env.DOCKERHUB_TOKEN != '' && env.DOCKERHUB_USERNAME != '' }} + id: docker_login + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Possible WARNING + if: ${{ steps.docker_login.outcome == 'skipped' }} + run: echo "::warning::DOCKERHUB_TOKEN and DOCKERHUB_USERNAME are empty. Will build but NOT push." + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-release-${{env.dockertag}} + restore-keys: ${{ runner.os }}-buildx-release + - id: build_and_push + name: Build and push + uses: docker/build-push-action@v2 + with: + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + context: docker/release/ + push: ${{ steps.docker_login.outcome == 'success' && ( matrix.dockerpush == '' || matrix.dockerpush == 'yes' ) }} + - # Temp fix + # https://github.com/docker/build-push-action/issues/252 + # https://github.com/moby/buildkit/issues/1896 + name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + docker-D: + runs-on: [self-hosted, Linux] ## or ubuntu-latest + env: + DOCKERHUB_ORG: hyperledger ## Must be hyperledger, also can use iroha1, cannot use ${{ secrets.DOCKERHUB_ORG }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + IMAGE_NAME: iroha + steps: + - name: Show context + run: | + echo "::group::GitHub context" + cat >/dev/null <<'END' + ${{ toJson(github) }} + END + echo "::endgroup::" + echo "::group::GitHub needs" + cat >/dev/null <<'END' + ${{ toJson(needs) }} + END + echo "::endgroup::" + - name: System info + run: | + set -x + whoami + id $(whoami) + free || vm_stat | perl -ne '/page size of (\d+)/ and $size=$1; + /Pages\s+([^:]+)[^\d]+(\d+)/ and printf("%-16s % 16.2f Mi\n", "$1:", $2 * $size / 1048576);' + df -h + - name: Build info + run: | + cat << 'END' + ref:${{github.ref}} + sha:${{github.sha}} + run_number:${{github.run_number}} + event_name:${{github.event_name}} + event.action:${{github.event.action}} + event.issue.number:${{ github.event.issue.number }} + END + - name: REF and SHA of commented PR to ENV + if: github.event.comment + run: > + curl -fsSL ${{github.event.issue.pull_request.url}} -H "Authorization: token ${{github.token}}" | jq -r ' + + "PR_REF="+.head.ref, + "PR_SHA="+.head.sha, + "PR_NUM="+(.number|tostring), + "PR_REPO="+.head.repo.full_name' >>$GITHUB_ENV + - name: Checkout + uses: actions/checkout@v2 + with: + ref: ${{env.PR_REF}} ## not empty on issue_comment, else default value GITHUB_REF + repository: ${{env.PR_REPO}} ## not empty on issue_comment, else default value github.repository, required by forks + - name: Generate artifact suffix depending on matrix + env: + os: ubuntu ## maybe ${{ run.os }} + CC: ${{ matrix.CC }} + BuildType: ${{ matrix.BuildType }} + CMAKE_USE: ${{ matrix.CMAKE_USE }} + run: | + set -x + cc=$(echo $CC | sed -Ee's,[-/],,g' ) + build_type=$(echo $BuildType | tr A-Z a-z | sed -E -es,debug,dbg, -es,release,rel, ) + test $build_type = dbg -o $build_type = rel + uses=$(echo $CMAKE_USE | + tr ' ' '\n' | sed -nE -e's,.*USE_([a-zA-Z]+)=ON.*,\1,gp ; s, ,-, ;' | xargs | tr ' ' - | tr A-Z a-z ) + _os=${os:+-$os} _cc=${cc:+-$cc} _build_type=${build_type:+-$build_type} _uses=${uses:+-$uses} + echo >>$GITHUB_ENV ARTIFACT_SUFFIX=$_os$_cc$_build_type$_uses + echo >>$GITHUB_ENV _uses_suffix=$_uses + echo >>$GITHUB_ENV _compiler_suffix=$(test $cc != gcc9 && echo $_cc) + echo >>$GITHUB_ENV _debug_suffix=$(test "$build_type" = dbg && echo -debug || true) + # echo >>$GITHUB_ENV _os=$_os _cc=$_cc _build_type=$_build_type _uses=$_uses + - name: Download artifact + uses: actions/download-artifact@v2 + with: + name: iroha-deb${{env.ARTIFACT_SUFFIX}} + - name: Rename artifact debs + run: | + mv *iroha_shepherd.deb docker/release/iroha_shepherd.deb + mv *irohad.deb docker/release/iroha.deb + - name: Determine dockertag + id: dockertag + run: | + echo "::set-output name=dockertag::$dockertag" + echo >>$GITHUB_ENV dockertag=$dockertag + test -n "$DOCKERHUB_ORG" || { + echo ::error::"DOCKERHUB_ORG must contain value" + false + } + env: + dockertag: ${{ hashFiles('docker/release/**') }} + - name: Docker meta + id: meta + uses: docker/metadata-action@v3 + with: + tags: | + type=raw,value=${{env.dockertag}} + type=ref,event=branch + type=ref,event=pr + type=ref,event=tag + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=schedule + type=edge,branch=support/1.2.x + type=edge,branch=develop + type=edge,branch=test-ci + type=sha,prefix=commit-,format=short + type=sha,prefix=commit-,format=long + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed + ## - tag 'edge' when branch support/1.2.x is pushed + ## - schedule, see the docs + + images: ${{ env.DOCKERHUB_ORG }}/${{ env.IMAGE_NAME }}${{ env._uses_suffix }} ## uses suffics could be empty, -burrow, -ursa + flavor: suffix=${{env._compiler_suffix}}${{env._debug_suffix}} + #maybetodo flavor: prefix=${{ env.USES_PREFIX }} ## In case creating repository hyperledger/iroha-burrow denied, Use tag prefix hyperledger/iroha:burrow-xxxx + - name: Login to DockerHub + if: ${{ env.DOCKERHUB_TOKEN != '' && env.DOCKERHUB_USERNAME != '' }} + id: docker_login + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Possible WARNING + if: ${{ steps.docker_login.outcome == 'skipped' }} + run: echo "::warning::DOCKERHUB_TOKEN and DOCKERHUB_USERNAME are empty. Will build but NOT push." + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-release-${{env.dockertag}} + restore-keys: ${{ runner.os }}-buildx-release + - id: build_and_push + name: Build and push + uses: docker/build-push-action@v2 + with: + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + context: docker/release/ + push: ${{ steps.docker_login.outcome == 'success' && ( matrix.dockerpush == '' || matrix.dockerpush == 'yes' ) }} + - # Temp fix + # https://github.com/docker/build-push-action/issues/252 + # https://github.com/moby/buildkit/issues/1896 + name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + needs: + - build-UD + - generate_matrixes + strategy: + fail-fast: false + matrix: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_debug ) }} + if: ${{ fromJSON( needs.generate_matrixes.outputs.matrix_ubuntu_debug ).include[0] }} + # env: + # <<: *env_dockerhub_release + # IMAGE_NAME: iroha-debug diff --git a/.github/workflows/repolinter.yml b/.github/workflows/repolinter.yml new file mode 100644 index 00000000000..0f956ea927c --- /dev/null +++ b/.github/workflows/repolinter.yml @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: Apache-2.0 +# Hyperledger Repolinter Action +name: Repolinter + +on: + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + container: hyperledger-tools.jfrog.io/repolinter:0.10.0 + steps: + - name: Checkout Code + uses: actions/checkout@v2 + - name: Lint Repo + continue-on-error: true + run: bundle exec /app/bin/repolinter.js --rulesetUrl https://raw.githubusercontent.com/hyperledger-labs/hyperledger-community-management-tools/master/repo_structure/repolint.json --format markdown > /repolinter-report.md + - name: Save repolinter-report file + uses: actions/upload-artifact@v2 + with: + name: repolinter-report + path: /repolinter-report.md diff --git a/.jenkinsci/build.groovy b/.jenkinsci/build.groovy index 60bb724f245..86c1bad47ee 100644 --- a/.jenkinsci/build.groovy +++ b/.jenkinsci/build.groovy @@ -50,7 +50,7 @@ def sonarScanner(scmVars, environment) { -Dsonar.projectVersion=${BUILD_TAG} \ -Dsonar.github.oauth=${SORABOT_TOKEN} ${sonar_option} """ - if (scmVars.GIT_BRANCH == "master" ) + if (scmVars.GIT_BRANCH == "main" ) // push analysis results to sonar sh """ sonar-scanner \ diff --git a/.jenkinsci/builders/x64-linux-build-steps.groovy b/.jenkinsci/builders/x64-linux-build-steps.groovy index 7efc1928822..afc330495fb 100644 --- a/.jenkinsci/builders/x64-linux-build-steps.groovy +++ b/.jenkinsci/builders/x64-linux-build-steps.groovy @@ -83,7 +83,7 @@ def buildSteps(int parallelism, String compiler, String build_type, boolean buil """ postgresIP = sh(script: "docker inspect ${env.IROHA_POSTGRES_HOST} --format '{{ (index .NetworkSettings.Networks \"${env.IROHA_NETWORK}\").IPAddress }}'", returnStdout: true).trim() - def referenceBranchOrCommit = 'master' + def referenceBranchOrCommit = 'main' if (scmVars.GIT_LOCAL_BRANCH == referenceBranchOrCommit && scmVars.GIT_PREVIOUS_COMMIT) { referenceBranchOrCommit = scmVars.GIT_PREVIOUS_COMMIT } diff --git a/.jenkinsci/text-variables.groovy b/.jenkinsci/text-variables.groovy index 22faad61fef..c44acb716bf 100644 --- a/.jenkinsci/text-variables.groovy +++ b/.jenkinsci/text-variables.groovy @@ -202,7 +202,7 @@ cmd_description = """
  • -

    doxygen = false (or = true if master )

    +

    doxygen = false (or = true if main )

    • Build doxygen, docs will be uploaded to jenkins,

      @@ -246,7 +246,7 @@ cmd_description = """
  • -

    pushDockerTag = 'not-supposed-to-be-pushed'(or = master if master)

    +

    pushDockerTag = 'not-supposed-to-be-pushed'(or = main if main)

    • if packagePush=true it the name of docker tag that will be pushed

      @@ -257,7 +257,7 @@ cmd_description = """
  • -

    packagePush = false (or = true if master )

    +

    packagePush = false (or = true if main )

    • push all packages and docker to the artifactory and docker hub

      @@ -312,7 +312,7 @@ cmd_description = """
  • -

    specialBranch = false (or = true if master ),

    +

    specialBranch = false (or = true if main ),

    • Not recommended to set, it used to decide push doxygen and iroha:develop-build or not, and force to run build_type = 'Release'

      diff --git a/.packer/README.md b/.packer/README.md index 015a67e06cd..7d9dcd95fb1 100644 --- a/.packer/README.md +++ b/.packer/README.md @@ -1,7 +1,7 @@ ## Quick Start ``` cd win/ -packer build -var 'windows_password=' -var 'security_group_id=' -var 'iroha_repo=https://github.com/hyperledger/iroha.git' -var 'iroha_branches=master, support/1.1.x' windows-build-server.json +packer build -var 'windows_password=' -var 'security_group_id=' -var 'iroha_repo=https://github.com/hyperledger/iroha.git' -var 'iroha_branches=main, support/1.1.x' windows-build-server.json ``` Where : diff --git a/.packer/win/scripts/vcpkg.ps1 b/.packer/win/scripts/vcpkg.ps1 index 0991c5968a9..405aa441fbb 100644 --- a/.packer/win/scripts/vcpkg.ps1 +++ b/.packer/win/scripts/vcpkg.ps1 @@ -19,7 +19,7 @@ Invoke-Expression "$vcpkg_path\bootstrap-vcpkg.bat" echo "Installing vcpkg packages" Invoke-Expression "$vcpkg_path\vcpkg.exe install --triplet x64-windows (Get-Content -Path $iroha_vcpkg_path\VCPKG_DEPS_LIST)" -Invoke-Expression "$vcpkg_path\vcpkg.exe install --triplet x64-windows --head (Get-Content -Path $iroha_vcpkg_path\VCPKG_HEAD_DEPS_LIST)" +#Invoke-Expression "$vcpkg_path\vcpkg.exe install --triplet x64-windows --head (Get-Content -Path $iroha_vcpkg_path\VCPKG_HEAD_DEPS_LIST)" echo "Run vcpkg.exe integrate install" Invoke-Expression "$vcpkg_path\vcpkg.exe integrate install" diff --git a/.packer/win/scripts/vcpkg_for_multiple_branch.ps1 b/.packer/win/scripts/vcpkg_for_multiple_branch.ps1 index c7121686e4a..3d4b11d25c6 100644 --- a/.packer/win/scripts/vcpkg_for_multiple_branch.ps1 +++ b/.packer/win/scripts/vcpkg_for_multiple_branch.ps1 @@ -1,10 +1,10 @@ # This script runs vcpkg.ps1 multiple time with different iroha branches # It is very helpful then you need setup multiple vcpkg on windows build agent, -# for example build master and develop from one ami +# for example build main and develop from one ami param( [string] $iroha_repo = "https://github.com/hyperledger/iroha.git", - [array] $branches = "master" + [array] $branches = "main" ) $ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue'; diff --git a/.packer/win/windows-build-server.json b/.packer/win/windows-build-server.json index 2b0bdae023b..36cab89846d 100644 --- a/.packer/win/windows-build-server.json +++ b/.packer/win/windows-build-server.json @@ -34,7 +34,7 @@ "launch_block_device_mappings": [ { "device_name": "/dev/sda1", - "volume_size": 80, + "volume_size": 240, "volume_type": "gp2", "delete_on_termination": true } diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..6a75bd175ab --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,22 @@ +repos: +- repo: local + hooks: + - id: make-workflows + name: Make GitHub workflows from *.src.yml + entry: bash -c '.github/make-workflows.sh && git add .github/workflows' + language: system + types: [yaml] + pass_filenames: false + +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + # - id: check-json + # - id: check-xml + - id: check-merge-conflict + - id: check-added-large-files + - id: end-of-file-fixer + exclude: '.*\.patch$' + - id: trailing-whitespace + exclude: '.*\.patch$' diff --git a/CMakeLists.txt b/CMakeLists.txt index 2db43346d74..2f368d61dfc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,6 +25,7 @@ else() endif() SET(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1) SET(CMAKE_INSTALL_RPATH "../lib") +SET(CMAKE_EXPORT_COMPILE_COMMANDS ON) ## use for code static analyze and vscode cpp plugin if(CMAKE_BUILD_TYPE MATCHES "Release") include(CheckIPOSupported) @@ -38,6 +39,11 @@ if(CMAKE_BUILD_TYPE MATCHES "Release") endif() endif() +## FIXME revert this after change CI to GitHub actions. +## Temporal fix for 'bytecode stream version incompatible' between gcc-9 and gcc-10 and clang +## when dependancies were build via vcpkg with default GCC9 could not be linked with iroha built with GCC-10 +set(CMAKE_INTERPROCEDURAL_OPTIMIZATION FALSE) + if(WIN32) # We have to set _WIN32_WINNT for gRPC if(${CMAKE_SYSTEM_VERSION} EQUAL 10) # Windows 10 @@ -148,15 +154,6 @@ else() set(MAKE make) endif() -if(PACKAGE_TGZ OR PACKAGE_ZIP OR PACKAGE_RPM OR PACKAGE_DEB) - message(STATUS "Packaging enabled: forcing non-packaging options to OFF") - set(BENCHMARKING OFF) - set(TESTING OFF) - set(USE_BTF OFF) - set(COVERAGE OFF) - set(FUZZING OFF) -endif() - message(STATUS "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}") message(STATUS "-DTESTING=${TESTING}") message(STATUS "-DUSE_BTF=${USE_BTF}") diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cfeddb788ec..496ba4c0366 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -21,10 +21,12 @@ Reporting Bugs to produce an incorrect or unexpected result, or to behave in unintended ways. -Bugs are tracked as `JIRA -issues `__ +Bugs are tracked as `GitHub issues ` (this is the preferred option) or as `JIRA +issues ` (if it is convenient to you)__ in Hyperledger Jira. +If you decide to go with the GitHub issues, just `click on this link ` and follow the instructions in the template. + To submit a bug, `create new issue `__ and include these details: @@ -75,9 +77,12 @@ Suggesting Improvements An *improvement* is a code or idea, which makes **existing** code or design faster, more stable, portable, secure or better in any other way. -Improvements are tracked as `JIRA +Improvements are tracked as `GitHub issues ` (this is the preferred option) or as `JIRA improvements `__. -To submit new improvement, `create new + +Again, if you choose GitHub issues, just `click on this link ` and follow the instructions in the template. + +To submit a new improvement in JIRA, `create new issue `__ and include these details: @@ -113,9 +118,8 @@ community could help you. You can also help others! Your First Code Contribution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Read our `C++ Style Guide <#c-style-guide>`__ and start with -beginner-friendly issues with JIRA label -`good-first-issue `__. +Read our `C++ Style Guide <#c-style-guide>`__ and start with checking out `the GitHub board ` or the beginner-friendly issues in JIRA with +`good-first-issue label `__. Indicate somehow that you are working on this task: get in touch with maintainers team, community or simply assign this issue to yourself. diff --git a/Jenkinsfile b/Jenkinsfile index 3ad5b97f118..afd209738d2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -170,7 +170,7 @@ node ('master') { use_burrow = false forceDockerDevelopBuild = false - if (scmVars.GIT_LOCAL_BRANCH in ["master"] || env.TAG_NAME ) + if (scmVars.GIT_LOCAL_BRANCH in ["main"] || env.TAG_NAME ) specialBranch = true else specialBranch = false @@ -184,8 +184,8 @@ node ('master') { use_burrow = true } - if (scmVars.GIT_LOCAL_BRANCH == "master") - pushDockerTag = 'master' + if (scmVars.GIT_LOCAL_BRANCH == "main") + pushDockerTag = 'main' else if (env.TAG_NAME) pushDockerTag = env.TAG_NAME else @@ -210,7 +210,8 @@ node ('master') { break; case 'On open PR': // Just hint, not the main way to Notify about build status. - gitNotify ("Jenkins: Merge to trunk", "Please, run: 'Before merge to trunk'", 'PENDING', env.JOB_URL + "/build") + ///Disable yellow status @kuvaldini @BulatSaif https://github.com/hyperledger/iroha/pull/1028#issuecomment-872080478 + //gitNotify ("Jenkins: Merge to trunk", "Please, run: 'Before merge to trunk'", 'PENDING', env.JOB_URL + "/build") mac_compiler_list = ['appleclang'] win_compiler_list = ['msvc'] testList = '()' @@ -220,7 +221,8 @@ node ('master') { codestyle = true break; case 'Commit in Open PR': - gitNotify ("Jenkins: Merge to trunk", "Please, run: 'Before merge to trunk'", 'PENDING', env.JOB_URL + "/build") + ///Disable yellow status @kuvaldini @BulatSaif https://github.com/hyperledger/iroha/pull/1028#issuecomment-872080478 + // gitNotify ("Jenkins: Merge to trunk", "Please, run: 'Before merge to trunk'", 'PENDING', env.JOB_URL + "/build") echo "All Default" break; case 'Before merge to trunk': @@ -346,7 +348,7 @@ node ('master') { x64LinuxAlwaysPostSteps, "x86_64 Linux ${build_type} ${compiler}", x64LinuxWorker, tasks) } } - // If "master" also run Release build + // If "main" also run Release build if (release_build){ registerBuildSteps([{x64LinuxBuildScript.buildSteps( current_parallelism, first_compiler, 'Release', build_shared_libs, specialBranch, /*coverage*/false, @@ -354,7 +356,7 @@ node ('master') { /*fuzzing*/false, /*benchmarking*/false, /*coredumps*/false, /*use_btf*/false, use_libursa, use_burrow, /*force_docker_develop_build*/false, /*manifest_push*/false, environmentList)}], x64LinuxPostSteps, "x86_64 Linux Release ${first_compiler}", x64LinuxWorker, tasks) - // will not be executed in usual case, because x64linux_compiler_list = ['gcc9'] for master branch or tags + // will not be executed in usual case, because x64linux_compiler_list = ['gcc9'] for main branch or tags if (x64linux_compiler_list.size() > 1){ x64linux_compiler_list[1..-1].each { compiler -> registerBuildSteps([{x64LinuxBuildScript.buildSteps( @@ -393,7 +395,7 @@ node ('master') { def s390xLinuxBuildSteps def s390xLinuxPostSteps = new Builder.PostSteps() - if(!s390xlinux_compiler_list.isEmpty()){ + if(false && !s390xlinux_compiler_list.isEmpty()){ s390xLinuxAlwaysPostSteps = new Builder.PostSteps( always: [{x64LinuxBuildScript.alwaysPostSteps(scmVars, environmentList, coredumps)}]) s390xLinuxPostSteps = new Builder.PostSteps( @@ -420,7 +422,7 @@ node ('master') { s390xLinuxAlwaysPostSteps, "s390x Linux ${build_type} ${compiler}", s390xLinuxWorker, tasks) } } - // If "master" also run Release build + // If "main" also run Release build if (release_build){ registerBuildSteps([{x64LinuxBuildScript.buildSteps( current_parallelism, first_compiler, 'Release', build_shared_libs, specialBranch, /*coverage_s390x*/false, @@ -446,7 +448,7 @@ node ('master') { mac_compiler_list, build_type, coverage_mac, testing, testList, packageBuild, fuzzing, benchmarking, useBTF, use_libursa, use_burrow, environmentList)}], release_build ? x64MacAlwaysPostSteps : x64MacPostSteps, "Mac ${build_type}", x64MacWorker, tasks) - //If "master" also run Release build + //If "main" also run Release build if (release_build) { registerBuildSteps([{x64BuildScript.buildSteps(current_parallelism, mac_compiler_list, 'Release', /*coverage_mac*/false, /*testing*/false, testList, /*packageBuild*/true, /*fuzzing*/false, /*benchmarking*/false, /*use_btf*/false, diff --git a/clean.sh b/clean.sh deleted file mode 100755 index 31b97c825ff..00000000000 --- a/clean.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash - -rm -rf build -rm -rf cmake-build-debug diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index d14741d4e94..faed50875f8 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -95,11 +95,6 @@ find_package(gflags 2.2.2 REQUIRED CONFIG) ########################## find_package(rxcpp) -########################## -# TBB # -########################## -find_package(TBB REQUIRED CONFIG) - ########################## # boost # ########################## @@ -126,3 +121,14 @@ find_package(ed25519 REQUIRED CONFIG) # fmt # ################################### find_package(fmt 5.3.0 REQUIRED CONFIG) + +################################### +# prometheus-cpp # +################################### +find_package(prometheus-cpp REQUIRED CONFIG) +find_package(civetweb CONFIG REQUIRED) + +################################### +# rocksdb # +################################### +find_package(RocksDB CONFIG REQUIRED) diff --git a/cmake/functions.cmake b/cmake/functions.cmake index 8b51f304ee3..9acecec2da4 100644 --- a/cmake/functions.cmake +++ b/cmake/functions.cmake @@ -8,11 +8,12 @@ function(strictmode target) ) # Enable more warnings and turn them into compile errors. if ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR - (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR - (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")) - target_compile_options(${target} PRIVATE -Wall -Wpedantic -Werror -Wno-potentially-evaluated-expression) - elseif ((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") OR - (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")) + (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR + (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")) + #target_compile_options(${target} PRIVATE -Wall -Wpedantic -Werror -Wno-potentially-/evaluated-expression) + #target_compile_options(${test_name} PRIVATE -Wno-inconsistent-missing-override -Wno-gnu-zero-variadic-macro-arguments) + elseif ((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") OR + (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")) target_compile_options(${target} PRIVATE /W3 /WX) else () message(AUTHOR_WARNING "Unknown compiler: building target ${target} with default options") @@ -42,15 +43,6 @@ function(addtest test_name SOURCES) # protobuf generates warnings at the moment strictmode(${test_name}) endif () - if ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") OR - (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR - (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")) - target_compile_options(${test_name} PRIVATE -Wno-inconsistent-missing-override -Wno-gnu-zero-variadic-macro-arguments) - elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # do nothing, but also don't spam warning on each test - else () - message(AUTHOR_WARNING "Unknown compiler: building target ${target} with default options") - endif () endfunction() # Creates benchmark "bench_name", with "SOURCES" (use string as second argument) diff --git a/docker/develop/Dockerfile b/docker/develop/Dockerfile index 740306ac7d6..b35f2a2293d 100644 --- a/docker/develop/Dockerfile +++ b/docker/develop/Dockerfile @@ -46,16 +46,21 @@ RUN mkdir ${GOPATH} ENV PATH=${PATH}:/opt/go/bin:${GOPATH}/bin RUN go get github.com/golang/protobuf/protoc-gen-go +## pip3 contains fresher versions of packages than apt +RUN pip3 install --no-cache-dir cmake ninja + # install dependencies COPY vcpkg /tmp/vcpkg-vars -RUN set -e; \ - export VCPKG_FORCE_SYSTEM_BINARIES=1; \ - sh /tmp/vcpkg-vars/build_iroha_deps.sh /tmp/vcpkg /tmp/vcpkg-vars; \ +RUN set -xe; \ + case "$(uname -m)" in arm*|s390*|ppc64*) export VCPKG_FORCE_SYSTEM_BINARIES=1 ;; esac; \ + /tmp/vcpkg-vars/build_iroha_deps.sh /tmp/vcpkg /tmp/vcpkg-vars; \ /tmp/vcpkg/vcpkg export $(/tmp/vcpkg/vcpkg list | cut -d':' -f1 | tr '\n' ' ') --raw --output=dependencies; \ mv /tmp/vcpkg/dependencies /opt/dependencies; \ chmod +x /opt/dependencies/installed/*/tools/protobuf/protoc*; \ unset VCPKG_FORCE_SYSTEM_BINARIES; \ rm -rf /tmp/vcpkg* + ##NOTE Newer packages like libpq may require newer cmake version, and will fail with no sanity error message when VCPKG_FORCE_SYSTEM_BINARIES=1; + ##NOTE But vcpkg on platforms arm,s390,ppc64 require VCPKG_FORCE_SYSTEM_BINARIES=1 # install sonar cli ENV SONAR_CLI_VERSION=3.3.0.1492 diff --git a/docker/iroha-builder/Dockerfile b/docker/iroha-builder/Dockerfile new file mode 100644 index 00000000000..ed3671c442d --- /dev/null +++ b/docker/iroha-builder/Dockerfile @@ -0,0 +1,81 @@ +FROM ubuntu:20.04 + +# number of concurrent threads during build +# usage: docker build --build-arg PARALLELISM=8 -t name/name . +ARG PARALLELISM=1 + +ENV IROHA_HOME /opt/iroha +ENV IROHA_BUILD /opt/iroha/build + +ARG DEBIAN_FRONTEND=noninteractive +RUN apt-get update && \ + apt-get -y --no-install-recommends install \ + apt-utils software-properties-common wget gpg-agent \ + libtool \ + # compilers (gcc-9, gcc-10) + build-essential g++-9 g++-10 cmake ninja-build \ + gdb gdbserver \ + # CI dependencies + git ssh tar gzip ca-certificates gnupg \ + # code coverage + lcov \ + # Python3 + python3-dev python3-pip python-is-python3 \ + # other + curl file ccache libssl-dev \ + gcovr cppcheck doxygen rsync graphviz graphviz-dev vim zip unzip pkg-config \ + jq \ + postgresql postgresql-contrib; \ + if [ $(uname -m) = "x86_64" ] ;then \ + apt-get -y --no-install-recommends install \ + clang-10 lldb-10 lld-10 libc++-10-dev libc++abi-10-dev clang-format; \ + fi; \ + apt-get -y clean && \ + rm -rf /var/lib/apt/lists/* && \ + rm -rf /var/cache/apt/archives/* + +RUN cd /opt/ && \ + git clone https://github.com/sobolevn/git-secret.git git-secret && \ + cd git-secret && make build && \ + PREFIX="/usr/local" make install + +# golang stuff for iroha+burrow +RUN curl -fL https://golang.org/dl/go1.14.2.linux-$(dpkg --print-architecture).tar.gz | tar -C /opt -xz +ENV GOPATH=/opt/gopath +RUN mkdir ${GOPATH} +ENV PATH=${PATH}:/opt/go/bin:${GOPATH}/bin +RUN go get github.com/golang/protobuf/protoc-gen-go + +## Allow access to database, trust local connections +# RUN sed -i /etc/postgresql/12/main/pg_hba.conf -Ee's,(^local\s+all\s+postgres\s+)\w+,\1trust,' +RUN echo " \n\ +# TYPE DATABASE USER ADDRESS METHOD \n\ +local all all trust \n\ +host all all 127.0.0.1/32 trust \n\ +host all all ::1/128 trust \n\ +local replication all trust \n\ +host replication all 127.0.0.1/32 trust \n\ +host replication all ::1/128 trust \n\ +" > /etc/postgresql/12/main/pg_hba.conf + +ENV PATH=${PATH}:/usr/lib/postgresql/12/bin/ + +## non-interactive adduser +## -m = create home dir +## -s = set default shell +## iroha-ci = username +## -u = userid, default for Ubuntu is 1000 +## -U = create a group same as username +## no password +RUN useradd -ms /bin/bash iroha-ci -u 1000 -U && \ + usermod -aG postgres iroha-ci + +WORKDIR /opt/iroha +RUN set -e; \ + chown -R iroha-ci:iroha-ci /opt/iroha; \ + chmod -R 777 /opt/iroha; \ + mkdir -p /tmp/ccache -m 777; \ + ccache --clear + +USER iroha-ci +# CMD /bin/bash diff --git a/docker/release/wait-for-it.sh b/docker/release/wait-for-it.sh index bbe404324bc..c5b4532aacf 100755 --- a/docker/release/wait-for-it.sh +++ b/docker/release/wait-for-it.sh @@ -53,10 +53,9 @@ wait_for_wrapper() { # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 if [[ $QUIET -eq 1 ]]; then - timeout $BUSYTIMEFLAG $TIMEOUT $0 --quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - else - timeout $BUSYTIMEFLAG $TIMEOUT $0 --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & + local quiet=--quiet fi + timeout $BUSYTIMEFLAG $TIMEOUT $0 $quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & PID=$! trap "kill -INT -$PID" INT wait $PID @@ -141,7 +140,6 @@ STRICT=${STRICT:-0} CHILD=${CHILD:-0} QUIET=${QUIET:-0} -# check to see if timeout is from busybox? # check to see if timeout is from busybox? TIMEOUT_PATH=$(realpath $(which timeout)) if [[ $TIMEOUT_PATH =~ "busybox" ]]; then @@ -154,8 +152,7 @@ fi if [[ $CHILD -gt 0 ]]; then wait_for - RESULT=$? - exit $RESULT + exit $? else if [[ $TIMEOUT -gt 0 ]]; then wait_for_wrapper diff --git a/docs/source/conf.py b/docs/source/conf.py index 474a9134b4f..eabeb722e7a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -19,7 +19,7 @@ # # needs_sphinx = '1.0' -# Add any Sphinx extension module names here, as strings. They can be +# Add any Sphinx extension module names here, as strm2rngs. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ @@ -29,8 +29,9 @@ 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', - 'm2r', - 'sphinx_extensions.iroha_permissions' + 'm2r2', + 'sphinx_extensions.iroha_permissions', + "sphinxext.remoteliteralinclude" ] html_static_path = ['_static'] diff --git a/docs/source/configure/index.rst b/docs/source/configure/index.rst index 22fe493b026..b277fe8b3ca 100755 --- a/docs/source/configure/index.rst +++ b/docs/source/configure/index.rst @@ -14,40 +14,12 @@ Some configuration parameters must be the same in all the nodes (they are marked Let's take a look at ``example/config.sample`` -.. note:: Starting with v1.2 ``irohad`` can also be configured via environment variables, not only via config file. We will start with looking at config file and then look at how Iroha can be configured with `environment parameters <#environment-variables>`_. +.. note:: Starting with v1.2 ``irohad`` can also be configured via environment variables, not only via config file. +We will start with looking at config file and then look at how Iroha can be configured with +`environment parameters <#environment-variables>`_. -.. code-block:: javascript - :linenos: - - { - "block_store_path": "/tmp/block_store/", - "torii_port": 50051, - "torii_tls_params": { - "port": 55552, - "key_pair_path": "/path/to/the/keypair" - }, - "internal_port": 10001, - "pg_opt": "host=localhost port=5432 user=postgres password=mysecretpassword dbname=iroha", - "database": { - "host": "localhost", - "port": 5432, - "user": "postgres", - "password": "mysecretpassword", - "working database": "iroha_data", - "maintenance database": "postgres" - }, - "max_proposal_size": 10, - "proposal_delay": 5000, - "vote_delay": 5000, - "mst_enable" : false, - "mst_expiration_time" : 1440, - "max_rounds_delay": 3000, - "stale_stream_max_rounds": 2, - "utility_service": { - "ip": "127.0.0.1", - "port": 11001 - } - } +.. literalinclude:: ../../../example/config.sample + :language: json As you can see, configuration file is a valid ``json`` structure. Let's go line-by-line and understand what every parameter means in configuration file format. @@ -55,13 +27,13 @@ Let's go line-by-line and understand what every parameter means in configuration Deployment-specific parameters ============================== -- ``block_store_path`` sets path to the folder where blocks are stored. +- ``block_store_path`` (optional) sets path to the folder where blocks are stored. If this parameter is not specified, blocks will be stored in the database. - ``torii_port`` sets the port for external communications. Queries and transactions are sent here. - ``internal_port`` sets the port for internal communications: ordering service, consensus and block loader. - ``database`` (optional) is used to set the database configuration (see below) -- ``pg_opt`` (optional) is a deprecated way of setting credentials of PostgreSQL: +- ``pg_opt`` (optional) is a **deprecated** way of setting credentials of PostgreSQL: hostname, port, username, password and database name. All data except the database name are mandatory. If database name is not provided, the default one gets used, which is ``iroha_default``. @@ -69,7 +41,11 @@ Deployment-specific parameters (see below). - ``utility_service`` (optional) endpoint for maintenance tasks. If present, must include ``ip`` address and ``port`` to bind to. - See `shepherd docs <../maintenance/shepherd.html>` for an example usage of maintenance endpoint. + See `shepherd docs <../maintenance/shepherd.html>`_ for an example usage of maintenance endpoint. +- ``metrics`` (optional) endpoint to monitor iroha's metrics. Prometheus HTTP server listens on this endpoint. + If present, must correspond format "[addr]:" and could be for example "127.0.0.1:8080", "9090", or ":1234". + Wrong values implicitly disables Prometheus metrics server. There are also cmdline options ```--metrics_port`` and + ``--metrics_addr`` to override this parameter. There is also an optional ``torii_tls_params`` parameter, which could be included in the config to enable TLS support for client communication. @@ -107,22 +83,31 @@ Environment-specific parameters value you define the size of potential block. For a starter you can stick to ``10``. However, we recommend to increase this number if you have a lot of transactions per second. + **This parameter affects performance.** Increase this parameter, if your network has a big number of transactions going. If you increase ``max_proposal_size`` due to an inreased throughput, you can increase it independently. But if the speed stays approximately the same, you need to also increase ``proposal_delay`` to allow all these transactions to get into this one big proposal. By increasing this parameter you can improve the performance but note that at some point increasing this value can lead to degradation of the performance. + + - ``proposal_delay`` is a timeout in milliseconds that a peer waits a response from the orderding service with a proposal. + **This parameter affects performance.** If you want bigger proposal size, you will need to give the system time to collect this increased number of transactions into one proposal. + - ``vote_delay`` \* is a waiting time in milliseconds before sending vote to the next peer. Optimal value depends heavily on the amount of Iroha peers in the network (higher amount of nodes requires longer ``vote_delay``). We recommend to start with 100-1000 milliseconds. + **This parameter only affects consensus mechanism.** If your network is fast - you are good and this parameter does not effect your network much. But if your network is on a slower side, increase it to give more time for the peers to respond. + - ``mst_enable`` enables or disables multisignature transaction network transport in Iroha. Note that MST engine always works for any peer even when the flag is set to ``false``. The flag only allows sharing information about MST transactions among the peers. + - ``mst_expiration_time`` is an optional parameter specifying the time period in which a not fully signed transaction (or a batch) is considered expired (in minutes). The default value is 1440. + - ``max_rounds_delay`` \* is an optional parameter specifying the maximum delay between two consensus rounds (in milliseconds). The default value is 3000. @@ -133,6 +118,8 @@ Environment-specific parameters This parameter allows users to find an optimal value in a tradeoff between resource consumption and the delay of getting back to work after an idle period. + **This parameter affects resource consumption.** When you can expect Iroha to stay idle for longer periods of time and would like to save some resources, increase this value - it will make Iroha check for new transactions more rarely. NB: the first transaction after idle period might be a little delayed due to that. Second and further blocks will be processed quicker. + - ``stale_stream_max_rounds`` is an optional parameter specifying the maximum amount of rounds to keep an open status stream while no status update is reported. @@ -141,6 +128,8 @@ Environment-specific parameters track a transaction if for some reason it is not updated with new rounds. However large values increase the average number of connected clients during each round. + It is recommended to limit this parameter to make sure the node is not overloaded with streams. + - ``initial_peers`` is an optional parameter specifying list of peers a node will use after startup instead of peers from genesis block. It could be useful when you add a new node to the network where the most of @@ -156,6 +145,26 @@ Environment-specific parameters } ] +Good Practice Example +--------------------- + +With even distribution we received quite good results - with 300k transactions sent in 5 minutes. +Commit took from 2 seconds to 2 minutes. +**Please note that results always depend on number of peers in your network, its speed and parameters of the hosts on which the peers run.** + +Here is the configuration we used: + +.. code-block:: javascript + + "max_proposal_size" : 10000, + "proposal_delay" : 1000, + "vote_delay" : 1000, + "mst_enable" : true, + "mst_expiration_time": 1440, + "max_rounds_delay": 500, + "stale_stream_max_rounds": 100000 + + Environment variables ===================== diff --git a/docs/source/deploy/flags.rst b/docs/source/deploy/flags.rst new file mode 100644 index 00000000000..515ff8938ed --- /dev/null +++ b/docs/source/deploy/flags.rst @@ -0,0 +1,32 @@ +irohad Flags +================ + +You can start ``irohad`` with different flags. +Some of the main ones `were already mentioned `_ but there are others, that you might find useful for your unique situation. +Here they are: + ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| Flag | Description | Type | Default | ++=========================+=====================================================================+=================+================+ +| ``-config`` | specifies Iroha provisioning path | ``string`` | "" | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-drop_state`` | drops existing state data at startup | ``bool`` | false | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-genesis_block`` | specifies file with initial block | ``string`` | "" | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-keypair_name`` | specifies name of .pub and .priv files | ``string`` | "" | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-metrics_addr`` | Prometeus HTTP server listen address | ``string`` | "127.0.0.1" | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-metrics_port`` | Prometeus HTTP server listens port, disabled by default | ``string`` | "" | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-overwrite_ledger`` | overwrites ledger data if existing | ``bool`` | false | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-reuse_state`` | tries to reuse existing state data at startup (Deprecated, startup | ``bool`` | true | +| | reuses state by default. Use ``drop_state`` to drop the WSV) | | | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-verbosity`` | log verbosity | ``string`` | "config_file" | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ +| ``-wait_for_new_blocks``| startup synchronization policy - waits for new blocks in blockstore,| ``bool`` | false | +| | does not run network | | | ++-------------------------+---------------------------------------------------------------------+-----------------+----------------+ diff --git a/docs/source/deploy/index.rst b/docs/source/deploy/index.rst index f60f08f6895..313b875a771 100644 --- a/docs/source/deploy/index.rst +++ b/docs/source/deploy/index.rst @@ -22,6 +22,7 @@ So, to be on the safe side, please try to deploy on newer versions of Linux (see :maxdepth: 1 single.rst + flags.rst multiple.rst k8s-deployment.rst troubles.rst diff --git a/docs/source/deploy/single.rst b/docs/source/deploy/single.rst index 912aeb43e6f..0cf6b59638f 100644 --- a/docs/source/deploy/single.rst +++ b/docs/source/deploy/single.rst @@ -19,6 +19,22 @@ Run postgres server In order to run postgres server locally, you should check postgres `website `__ and follow their description. Generally, postgres server runs automatically when the system starts, but this should be checked in the configuration of the system. +Postgres database server could be initialized and started manually without usual system integration: + +.. code-block::shell + + initdb ~/iroha/nodeX_db/ + ## Start server in background, logs will appear in current console + postgres -D ~/iroha/nodaX_db/ -p5433 & + createuser -s iroha_user -p5433 + +Selected port ``5433`` (default is 5432) and database user ``iroha_user`` are used by irohad to connect to database. +See `Configure`_. Maintenance database ``postgres`` is created by default, but if for some reason another name required, create it: + +.. code-block::shell + + createdb iroha_mainteance -p5433 + Run iroha daemon (irohad) """"""""""""""""""""""""" @@ -137,4 +153,3 @@ If they are met, you can move forward with the following command: # Docker network name --network=iroha-network \ hyperledger/iroha:latest - diff --git a/docs/source/develop/api/commands.rst b/docs/source/develop/api/commands.rst index 295996099c9..8fe9ecfc570 100644 --- a/docs/source/develop/api/commands.rst +++ b/docs/source/develop/api/commands.rst @@ -64,6 +64,7 @@ Purpose The purpose of add peer command is to write into ledger the fact of peer addition into the peer network. After a transaction with AddPeer has been committed, consensus and synchronization components will start using it. +You can also `learn more about Add Peer command <../../maintenance/add_peer.html>`_. Schema ^^^^^^ diff --git a/docs/source/develop/api/queries.rst b/docs/source/develop/api/queries.rst index bc2315c56f6..35124709bb3 100644 --- a/docs/source/develop/api/queries.rst +++ b/docs/source/develop/api/queries.rst @@ -30,7 +30,7 @@ Here is how the "expression" is specified: kPosition = 1; } -There are 2 bases for ordering – on creation time and depending on the position in the block. +There are 2 bases for ordering – on creation time and depending on the number of block. There is an ascending and descending directions for each Field: @@ -55,15 +55,11 @@ Now, the ordering itself: After ordering is specified, pagination can be executed: -.. code-block:: proto +.. literalinclude:: ../../../../shared_model/schema/queries.proto + :language: proto + :start-at: message TxPaginationMeta { + :end-before: message AssetPaginationMeta { - message TxPaginationMeta { - uint32 page_size = 1; - oneof opt_first_tx_hash { - string first_tx_hash = 2; - } - Ordering ordering = 3; - } What is added to the request structure in case of pagination ------------------------------------------------------------ @@ -76,7 +72,11 @@ What is added to the request structure in case of pagination "First tx hash", "hash of the first transaction in the page. If that field is not set — then the first transactions are returned", "hash in hex format", "bddd58404d1315e0eb27902c5d7c8eb0602c16238f005773df406bc191308929" "ordering", "how the results should be ordered (before pagination is applied)", "see fields below", "see fields below" "ordering.sequence", "ordeing spec, like in SQL ORDER BY", "sequence of fields and directions", "[{kCreatedTime, kAscending}, {kPosition, kDescending}]" - + "First tx time", "time of the first transaction in query result. If that field is not set - then the transactions starting from first are returned", "Google Protocol Buffer Timestamp type", "0001-01-01T00:00:00Z <= first tx time <= 9999-12-31T23:59:59.999999999Z" + "Last tx time", "time of the last transaction in query result. If that field is not set - then the transactions up to the last are returned", "Google Protocol Buffer Timestamp type", "0001-01-01T00:00:00Z <= last tx time <= 9999-12-31T23:59:59.999999999Z" + "First tx height", "block height of the first transaction in query result. If that field is not set - then the transactions starting from height 1 are returned", "first tx height > 0", "4" + "Last tx height", "block height of the last transaction in query result. If that field is not set - then the transactions up to the last one are returned", "last tx height > 0", "6" + Engine Receipts ^^^^^^^^^^^^^^^ diff --git a/docs/source/getting_started/python-guide.rst b/docs/source/getting_started/python-guide.rst index 5a9501b0697..70d66044f23 100644 --- a/docs/source/getting_started/python-guide.rst +++ b/docs/source/getting_started/python-guide.rst @@ -1,6 +1,15 @@ Sending Transactions With Python library ======================================== +Open a new terminal (note that Iroha container and ``irohad`` should be up and +running) and attach to an ``iroha`` docker container: + +.. code-block:: shell + + docker exec -it iroha /bin/bash + +Now you are in the interactive shell of Iroha's container. + Prerequisites ------------- @@ -61,81 +70,11 @@ Running example transactions If you only want to try what Iroha transactions would look like, you can simply go to the examples from the repository `here `_. -Let's check out the `tx-example.py` file. - -Here are Iroha dependencies. -Python library generally consists of 3 parts: Iroha, IrohaCrypto and IrohaGrpc which we need to import: - -.. code-block:: python - - from iroha import Iroha, IrohaGrpc - from iroha import IrohaCrypto - -The line - -.. code-block:: python - - from iroha.primitive_pb2 import can_set_my_account_detail - - -is actually about the permissions you might be using for the transaction. -You can find a full list here: `Permissions <../develop/api/permissions.html>`_. - - -In the next block we can see the following: - -.. code-block:: python - - admin_private_key = 'f101537e319568c765b2cc89698325604991dca57b9716b58016b253506cab70' - user_private_key = IrohaCrypto.private_key() - user_public_key = IrohaCrypto.derive_public_key(user_private_key) - iroha = Iroha('admin@test') - net = IrohaGrpc() - -Here you can see the example account information. -It will be used later with the commands. -If you change the commands in the transaction, -the set of data in this part might also change depending on what you need. - -Defining the commands ---------------------- - -Let's look at the first of the defined commands: - -.. code-block:: python - - def create_domain_and_asset(): - commands = [ - iroha.command('CreateDomain', domain_id='domain', default_role='user'), - iroha.command('CreateAsset', asset_name='coin', - domain_id='domain', precision=2) - ] - tx = IrohaCrypto.sign_transaction( - iroha.transaction(commands), admin_private_key) - send_transaction_and_print_status(tx) - -Here we define a transaction made of 2 commands: CreateDomain and CreateAsset. -You can find a full list here: `commands <../develop/api/commands.html>`_. -Each of Iroha commands has its own set of parameters. -You can check them in command descriptions in `iroha-api-reference <../develop/api.html>`_. - -Then we sign the transaction with the parameters defined earlier. - -You can define `queries <../develop/api/queries.html>`_ the same way. - -Running the commands --------------------- - -Last lines - -.. code-block:: python +Here is the `tx-example.py` file with comments to clarify each step: - create_domain_and_asset() - add_coin_to_admin() - create_account_userone() - ... +.. remoteliteralinclude:: https://raw.githubusercontent.com/hyperledger/iroha-python/master/examples/tx-example.py + :language: python -run the commands defined previously. Now, if you have `irohad` running, you can run the example or your own file by simply opening the .py file in another tab. diff --git a/docs/source/maintenance/add_peer.rst b/docs/source/maintenance/add_peer.rst new file mode 100644 index 00000000000..55de2238c21 --- /dev/null +++ b/docs/source/maintenance/add_peer.rst @@ -0,0 +1,63 @@ +============ +Adding Peers +============ + +In HL Iroha, you can add new peers to the network while it is running. +This is done by using a special command, `AddPeer <../develop/api/commands.html#add-peer>`_. + +Requirements +============ + +**There should be a peer that:** + +— runs with a Genesis Block (initial block of the blockchain) identical to the one on the peers already in the network; + +— has a resolvable address; + +— has a peer keypair (Ed25519 with SHA-2/SHA-3) + +.. important:: The account that is sending the transaction adding a peer must have the `Can Add Peer permission <../develop/api/permissions.html#can-add-peer>`_ and to remove a peer —`Can Remove Peer permission <../develop/api/permissions.html#can-remove-peer>`_ in their role - this must be set in the genesis block. + +Usage +===== + +As described in `the API reference <../develop/api/commands.html#add-peer>`_ to use the command, you will only need: + +— a public key of the peer that you want to add to the network; + +— resolvable IP address of the peer + +Steps: + +1. Create a network with `Can Add Peer <../develop/api/permissions.html#can-add-peer>`_ and `Can Remove Peer <../develop/api/permissions.html#can-remove-peer>`_ permissions set up in the genesis block assigned to a user; +2. Create another peer running HL Iroha with the same genesis block and similar configuration; +3. Send a transaction from the account with the necessary permissions that has ``add peer`` command in it (see an example below) +4. Check the logs of the peers to see if everything is working correctly. + +You can also make sure the everything is ok by sending a transaction and checking if the number of blocks is the same on the nodes. + +.. note:: If there are only 1 existing peer running, you will need to configure the peers that you are adding so that they would have all of the peers (both already existing and the new ones) in the "initial_peers" parameter in the `configuration <../configure/index.html#environment-specific-parameters>`_. Another case when this is needed is when the network has been running for some time and the peers indicated in the genesis block are no longer there (because they were removed using Remove Peer command while new peers were added). + +Example +======= + +Here is what a command might look like in Python. +In this example we used `Root permission <../develop/api/permissions.html#root>`_ that has all permissions, including `Can Add Peer permission <../develop/api/permissions.html#can-add-peer>`_ and `Can Remove Peer permission <../develop/api/permissions.html#can-remove-peer>`_: + +.. code-block:: python + + def add_peer(): + peer1 = primitive_pb2.Peer() + peer1.address = '192.168.1.1:50541' + peer1.peer_key = '716fe505f69f18511a1b083915aa9ff73ef36e6688199f3959750db38b8f4bfc' + tx = iroha.transaction([ + iroha.command('AddPeer', peer=peer1) + ], creator_account=ADMIN_ACCOUNT_ID, quorum=1) + + IrohaCrypto.sign_transaction(tx, ADMIN_PRIVATE_KEY) + add_peer() + +Remove Peer +=========== + +To remove the peer, you will need to use `Remove Peer <../develop/api/commands.html#remove-peer>`_ command from the account that has ``CanRemovePeer permission``. diff --git a/docs/source/maintenance/index.rst b/docs/source/maintenance/index.rst index 47cc25991d4..7daefd73b5b 100644 --- a/docs/source/maintenance/index.rst +++ b/docs/source/maintenance/index.rst @@ -8,6 +8,7 @@ Hardware requirements, deployment process in details, aspects related to securit :maxdepth: 3 :caption: Table of contents + add_peer.rst restarting_node.rst sec-install.rst shepherd.rst diff --git a/docs/source/maintenance/restarting_node.rst b/docs/source/maintenance/restarting_node.rst index 61eca810e0b..7adc7fd07e5 100644 --- a/docs/source/maintenance/restarting_node.rst +++ b/docs/source/maintenance/restarting_node.rst @@ -38,18 +38,11 @@ Although it can be a great idea for some of the cases, but please consider that Please, restore it from blocks. -Enabling WSV Reuse +Dropping WSV ^^^^^^^^^^^^^^^^^^ -If you want to reuse WSV state, start Iroha with `--reuse_state` flag. -Given this flag, Iroha will not reset or overwrite the state database if it fails to start for whatever reason. - - -Enabling WSV Reuse -^^^^^^^^^^^^^^^^^^ - -If you want to reuse WSV state, start Iroha with `--reuse_state` flag. -Given this flag, Iroha will not reset or overwrite the state database if it fails to start for whatever reason. +By default Iroha reuses WSV state on startup, so there is no need in `--reuse_state` flag anymore. However, it is left for backward compatibility. +If you want to drop WSV state, start Iroha with '--drop_state' flag. Given this flag, Iroha will reset and overwrite the state database. State Database Schema version ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -79,3 +72,8 @@ Then it will find all migration paths that will transition your database to the .. seealso:: `Here `_ are some details about different migration cases and examples you can check out to perform migration + +Synchronize WSV mode. +^^^^^^^^^^^^^^^^^^^^^ + +Specify '--wait_for_new_blocks' options for WSV synchronization mode. Iroha restores WSV from blockstore and waits for new blocks to be added externally. In this mode Iroha will not perform network operations. diff --git a/docs/source/maintenance/shepherd.rst b/docs/source/maintenance/shepherd.rst index b5f361c559f..a7a1d5fc9ec 100644 --- a/docs/source/maintenance/shepherd.rst +++ b/docs/source/maintenance/shepherd.rst @@ -7,8 +7,8 @@ Shepherd is a command line utility that helps to perform maintenance tasks with Prerequisites ============= -To access irohad daemon, utility service has to be configured in it. -See `the configuration details <#configuring-irohad>`_. +To access irohad daemon, ``utility service`` has to be configured in it. +See `the configuration details <../configure/index.html#deployment-specific-parameters>`_. Next, when invoking ``shepherd``, pass the ``--irohad`` command line argument with address and port of irohad utility service: diff --git a/docs/source/requirements.txt b/docs/source/requirements.txt index e835f896264..5c73d8a5d53 100644 --- a/docs/source/requirements.txt +++ b/docs/source/requirements.txt @@ -23,7 +23,7 @@ restructuredtext-lint==1.1.2 singledispatch==3.4.0.3 six==1.11.0 snowballstemmer==1.2.1 -Sphinx==1.6.6 +Sphinx sphinx-autobuild==0.7.1 sphinx-intl==0.9.11 sphinx-rtd-theme==0.4.2 @@ -33,5 +33,6 @@ typing==3.6.2 urllib3==1.24.2 watchdog==0.8.3 yarg==0.1.9 -m2r +m2r2 pygments-lexer-solidity +sphinxext-remoteliteralinclude diff --git a/example/burrow_integration/integration_example.py b/example/burrow_integration/integration_example.py new file mode 100644 index 00000000000..2ece0e40fe1 --- /dev/null +++ b/example/burrow_integration/integration_example.py @@ -0,0 +1,217 @@ +import os +import binascii +from iroha import IrohaCrypto +from iroha import Iroha, IrohaGrpc +from iroha.primitive_pb2 import can_set_my_account_detail +import sys +from Crypto.Hash import keccak + +if sys.version_info[0] < 3: + raise Exception("Python 3 or a more recent version is required.") + +# Here is the information about the environment and admin account information: +IROHA_HOST_ADDR = os.getenv("IROHA_HOST_ADDR", "127.0.0.1") +IROHA_PORT = os.getenv("IROHA_PORT", "50051") +ADMIN_ACCOUNT_ID = os.getenv("ADMIN_ACCOUNT_ID", "admin@test") +ADMIN_PRIVATE_KEY = os.getenv( + "ADMIN_PRIVATE_KEY", + "f101537e319568c765b2cc89698325604991dca57b9716b58016b253506cab70", +) + +iroha = Iroha(ADMIN_ACCOUNT_ID) +net = IrohaGrpc("{}:{}".format(IROHA_HOST_ADDR, IROHA_PORT)) + +test_private_key = IrohaCrypto.private_key() +test_public_key = IrohaCrypto.derive_public_key(test_private_key).decode("utf-8") + + +def trace(func): + """ + A decorator for tracing methods' begin/end execution points + """ + + def tracer(*args, **kwargs): + name = func.__name__ + print('\tEntering "{}"'.format(name)) + result = func(*args, **kwargs) + print('\tLeaving "{}"'.format(name)) + return result + + return tracer + + +def make_number_hex_left_padded(number: str, width: int = 64): + number_hex = "{:x}".format(number) + return str(number_hex).zfill(width) + + +def left_padded_address_of_param( + param_index: int, number_of_params: int, width: int = 64 +): + """Specifies the position of each argument according to Contract AbI specifications.""" + bits_offset = 32 * number_of_params + bits_per_param = 64 + bits_for_the_param = bits_offset + bits_per_param * param_index + return make_number_hex_left_padded(bits_for_the_param, width) + + +def argument_encoding(arg): + """Encodes the argument according to Contract ABI specifications.""" + encoded_argument = str(hex(len(arg)))[2:].zfill(64) + encoded_argument = ( + encoded_argument + arg.encode("utf8").hex().ljust(64, "0").upper() + ) + return encoded_argument + + +def get_first_four_bytes_of_keccak(function_signature: str): + """Generates the first 4 bytes of the keccak256 hash of the function signature. """ + k = keccak.new(digest_bits=256) + k.update(function_signature) + return k.hexdigest()[:8] + + +@trace +def create_contract(): + bytecode = "608060405234801561001057600080fd5b5073a6abc17819738299b3b2c1ce46d55c74f04e290c6000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550610f90806100746000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c80632c74aaaf1461005c5780632cddc4111461008c57806337410dfa146100bc578063bc53c0c4146100ec578063d4e804ab1461011c575b600080fd5b61007660048036038101906100719190610893565b61013a565b6040516100839190610bcb565b60405180910390f35b6100a660048036038101906100a19190610996565b6102a9565b6040516100b39190610bcb565b60405180910390f35b6100d660048036038101906100d19190610893565b610481565b6040516100e39190610bcb565b60405180910390f35b610106600480360381019061010191906108ff565b61063d565b6040516101139190610bcb565b60405180910390f35b610124610807565b6040516101319190610bb0565b60405180910390f35b606060008383604051602401610151929190610c0f565b6040516020818303038152906040527f260b5d52000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16836040516102189190610b82565b600060405180830381855af49150503d8060008114610253576040519150601f19603f3d011682016040523d82523d6000602084013e610258565b606091505b50915091508161029d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161029490610d13565b60405180910390fd5b80935050505092915050565b60606000858585856040516024016102c49493929190610c92565b6040516020818303038152906040527f2cddc411000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168360405161038b9190610b82565b600060405180830381855af49150503d80600081146103c6576040519150601f19603f3d011682016040523d82523d6000602084013e6103cb565b606091505b509150915081610410576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161040790610cf3565b60405180910390fd5b8660405161041e9190610b99565b6040518091039020886040516104349190610b99565b60405180910390207f6a739057159b3f3e2efcba00d44b0fa47de56972ed8776a2da7682bcf7c67de18760405161046b9190610bed565b60405180910390a3809350505050949350505050565b606060008383604051602401610498929190610c0f565b6040516020818303038152906040527f37410dfa000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168360405161055f9190610b82565b600060405180830381855af49150503d806000811461059a576040519150601f19603f3d011682016040523d82523d6000602084013e61059f565b606091505b5091509150816105e4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016105db90610cf3565b60405180910390fd5b856040516105f29190610b99565b60405180910390207fd8ea495c3185a632d25d8ccc5c355aeb4058bfaaaee8647c075dc5c1ce62914c866040516106299190610bed565b60405180910390a280935050505092915050565b6060600084848460405160240161065693929190610c46565b6040516020818303038152906040527fbc53c0c4000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168360405161071d9190610b82565b600060405180830381855af49150503d8060008114610758576040519150601f19603f3d011682016040523d82523d6000602084013e61075d565b606091505b5091509150816107a2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161079990610cf3565b60405180910390fd5b856040516107b09190610b99565b6040518091039020876040516107c69190610b99565b60405180910390207fb4086b7a9e5eac405225b6c630a4147f0a8dcb4af3583733b10db7b91ad21ffd60405160405180910390a38093505050509392505050565b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600061083e61083984610d58565b610d33565b90508281526020810184848401111561085657600080fd5b610861848285610e09565b509392505050565b600082601f83011261087a57600080fd5b813561088a84826020860161082b565b91505092915050565b600080604083850312156108a657600080fd5b600083013567ffffffffffffffff8111156108c057600080fd5b6108cc85828601610869565b925050602083013567ffffffffffffffff8111156108e957600080fd5b6108f585828601610869565b9150509250929050565b60008060006060848603121561091457600080fd5b600084013567ffffffffffffffff81111561092e57600080fd5b61093a86828701610869565b935050602084013567ffffffffffffffff81111561095757600080fd5b61096386828701610869565b925050604084013567ffffffffffffffff81111561098057600080fd5b61098c86828701610869565b9150509250925092565b600080600080608085870312156109ac57600080fd5b600085013567ffffffffffffffff8111156109c657600080fd5b6109d287828801610869565b945050602085013567ffffffffffffffff8111156109ef57600080fd5b6109fb87828801610869565b935050604085013567ffffffffffffffff811115610a1857600080fd5b610a2487828801610869565b925050606085013567ffffffffffffffff811115610a4157600080fd5b610a4d87828801610869565b91505092959194509250565b610a6281610dd7565b82525050565b6000610a7382610d89565b610a7d8185610d9f565b9350610a8d818560208601610e18565b610a9681610eab565b840191505092915050565b6000610aac82610d89565b610ab68185610db0565b9350610ac6818560208601610e18565b80840191505092915050565b6000610add82610d94565b610ae78185610dbb565b9350610af7818560208601610e18565b610b0081610eab565b840191505092915050565b6000610b1682610d94565b610b208185610dcc565b9350610b30818560208601610e18565b80840191505092915050565b6000610b49602783610dbb565b9150610b5482610ebc565b604082019050919050565b6000610b6c602883610dbb565b9150610b7782610f0b565b604082019050919050565b6000610b8e8284610aa1565b915081905092915050565b6000610ba58284610b0b565b915081905092915050565b6000602082019050610bc56000830184610a59565b92915050565b60006020820190508181036000830152610be58184610a68565b905092915050565b60006020820190508181036000830152610c078184610ad2565b905092915050565b60006040820190508181036000830152610c298185610ad2565b90508181036020830152610c3d8184610ad2565b90509392505050565b60006060820190508181036000830152610c608186610ad2565b90508181036020830152610c748185610ad2565b90508181036040830152610c888184610ad2565b9050949350505050565b60006080820190508181036000830152610cac8187610ad2565b90508181036020830152610cc08186610ad2565b90508181036040830152610cd48185610ad2565b90508181036060830152610ce88184610ad2565b905095945050505050565b60006020820190508181036000830152610d0c81610b3c565b9050919050565b60006020820190508181036000830152610d2c81610b5f565b9050919050565b6000610d3d610d4e565b9050610d498282610e4b565b919050565b6000604051905090565b600067ffffffffffffffff821115610d7357610d72610e7c565b5b610d7c82610eab565b9050602081019050919050565b600081519050919050565b600081519050919050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b6000610de282610de9565b9050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b82818337600083830152505050565b60005b83811015610e36578082015181840152602081019050610e1b565b83811115610e45576000848401525b50505050565b610e5482610eab565b810181811067ffffffffffffffff82111715610e7357610e72610e7c565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000601f19601f8301169050919050565b7f4572726f722063616c6c696e67207365727669636520636f6e7472616374206660008201527f756e6374696f6e00000000000000000000000000000000000000000000000000602082015250565b7f4572726f722063616c6c696e67207365727669636520636f6e7472616374206660008201527f756e6374696f6e2000000000000000000000000000000000000000000000000060208201525056fea264697066735822122097d0915acf0fba6aaeec5068f9bf82acdbdce6d684729b0af81f55ee2929be6664736f6c63430008040033" + """Bytecode was generated using remix editor https://remix.ethereum.org/ from file iroha.sol. """ + tx = iroha.transaction( + [iroha.command("CallEngine", caller=ADMIN_ACCOUNT_ID, input=bytecode)] + ) + IrohaCrypto.sign_transaction(tx, ADMIN_PRIVATE_KEY) + net.send_tx(tx) + hex_hash = binascii.hexlify(IrohaCrypto.hash(tx)) + for status in net.tx_status_stream(tx): + print(status) + return hex_hash + + +@trace +def get_engine_receipts_address(tx_hash: str): + query = iroha.query("GetEngineReceipts", tx_hash=tx_hash) + IrohaCrypto.sign_query(query, ADMIN_PRIVATE_KEY) + response = net.send_query(query) + contract_add = response.engine_receipts_response.engine_receipts[0].contract_address + return contract_add + + +@trace +def get_engine_receipts_result(tx_hash: str): + query = iroha.query("GetEngineReceipts", tx_hash=tx_hash) + IrohaCrypto.sign_query(query, ADMIN_PRIVATE_KEY) + response = net.send_query(query) + result = response.engine_receipts_response.engine_receipts[ + 0 + ].call_result.result_data + bytes_object = bytes.fromhex(result) + ascii_string = bytes_object.decode("ASCII") + print(ascii_string) + + +@trace +def add_asset(address): + params = get_first_four_bytes_of_keccak(b"addAsset(string,string)") + no_of_param = 2 + for x in range(no_of_param): + params = params + left_padded_address_of_param(x, no_of_param) + params = params + argument_encoding("coin#domain") # asset id + params = params + argument_encoding("500") # amount of asset + tx = iroha.transaction( + [ + iroha.command( + "CallEngine", caller=ADMIN_ACCOUNT_ID, callee=address, input=params + ) + ] + ) + IrohaCrypto.sign_transaction(tx, ADMIN_PRIVATE_KEY) + response = net.send_tx(tx) + for status in net.tx_status_stream(tx): + print(status) + + +@trace +def create_account(address): + params = get_first_four_bytes_of_keccak(b"createAccount(string,string,string)") + no_of_param = 3 + for x in range(no_of_param): + params = params + left_padded_address_of_param(x, no_of_param) + params = params + argument_encoding("test4") # source account id + params = params + argument_encoding("test") # domain id + params = params + argument_encoding(test_public_key) # key + tx = iroha.transaction( + [ + iroha.command( + "CallEngine", caller=ADMIN_ACCOUNT_ID, callee=address, input=params + ) + ] + ) + IrohaCrypto.sign_transaction(tx, ADMIN_PRIVATE_KEY) + response = net.send_tx(tx) + for status in net.tx_status_stream(tx): + print(status) + hex_hash = binascii.hexlify(IrohaCrypto.hash(tx)) + return hex_hash + + +@trace +def transfer(address): + params = get_first_four_bytes_of_keccak( + b"transferAsset(string,string,string,string)" + ) + no_of_param = 4 + for x in range(no_of_param): + params = params + left_padded_address_of_param(x, no_of_param) + params = params + argument_encoding(ADMIN_ACCOUNT_ID) # source account + params = params + argument_encoding("userone@domain") # destination account + params = params + argument_encoding("coin#domain") # asset id + params = params + argument_encoding("100") # amount of asset + tx = iroha.transaction( + [ + iroha.command( + "CallEngine", caller=ADMIN_ACCOUNT_ID, callee=address, input=params + ) + ] + ) + IrohaCrypto.sign_transaction(tx, ADMIN_PRIVATE_KEY) + response = net.send_tx(tx) + for status in net.tx_status_stream(tx): + print(status) + + +@trace +def balance(address): + params = get_first_four_bytes_of_keccak(b"queryBalance(string,string)") + no_of_param = 2 + for x in range(no_of_param): + params = params + left_padded_address_of_param(x, no_of_param) + params = params + argument_encoding(ADMIN_ACCOUNT_ID) # account id + params = params + argument_encoding("coin#domain") # asset id + tx = iroha.transaction( + [ + iroha.command( + "CallEngine", caller=ADMIN_ACCOUNT_ID, callee=address, input=params + ) + ] + ) + IrohaCrypto.sign_transaction(tx, ADMIN_PRIVATE_KEY) + response = net.send_tx(tx) + for status in net.tx_status_stream(tx): + print(status) + hex_hash = binascii.hexlify(IrohaCrypto.hash(tx)) + return hex_hash + + +hash = create_contract() +address = get_engine_receipts_address(hash) +hash = balance(address) +get_engine_receipts_result(hash) +add_asset(address) +hash = balance(address) +get_engine_receipts_result(hash) +transfer(address) +hash = balance(address) +get_engine_receipts_result(hash) +hash = create_account(address) +get_engine_receipts_result(hash) + +print("done") diff --git a/example/burrow_integration/iroha.sol b/example/burrow_integration/iroha.sol new file mode 100644 index 00000000000..8440aaa1834 --- /dev/null +++ b/example/burrow_integration/iroha.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract Iroha { + address public serviceContractAddress; + + event Created(string indexed name, string indexed domain); + event Transferred(string indexed source, string indexed destination, string amount); + event Added(string indexed asset, string amount); + + + // Initializing service contract address in constructor + constructor(){ + serviceContractAddress = 0xA6Abc17819738299B3B2c1CE46d55c74f04E290C; + } + + // Creates an iroha ccount + function createAccount(string memory name, string memory domain, string memory key) public returns (bytes memory result) { + bytes memory payload = abi.encodeWithSignature( + "createAccount(string,string,string)", + name, + domain, + key); + (bool success, bytes memory ret) = address(serviceContractAddress).delegatecall(payload); + require(success, "Error calling service contract function"); + emit Created(name, domain); + result = ret; + } + + //Transfers asset from one iroha account to another + function transferAsset(string memory src, string memory dst, string memory asset, string memory amount) public returns (bytes memory result) { + bytes memory payload = abi.encodeWithSignature( + "transferAsset(string,string,string,string)", + src, + dst, + asset, + amount); + (bool success, bytes memory ret) = address(serviceContractAddress).delegatecall(payload); + require(success, "Error calling service contract function"); + + emit Transferred(src, dst, amount); + result = ret; + } + // Adds asset to iroha account + function addAsset(string memory asset, string memory amount) public returns (bytes memory result) { + bytes memory payload = abi.encodeWithSignature( + "addAsset(string,string)", + asset, + amount); + (bool success, bytes memory ret) = address(serviceContractAddress).delegatecall(payload); + require(success, "Error calling service contract function"); + + emit Added(asset, amount); + result = ret; + } + //Queries balance of an iroha account + function queryBalance(string memory _account, string memory _asset) public returns (bytes memory result) { + bytes memory payload = abi.encodeWithSignature( + "getAssetBalance(string,string)", + _account, + _asset); + (bool success, bytes memory ret) = address(serviceContractAddress).delegatecall(payload); + require(success,"Error calling service contract function "); + result = ret; + } +} diff --git a/example/config.postgres.sample b/example/config.postgres.sample new file mode 100644 index 00000000000..ea47dff255b --- /dev/null +++ b/example/config.postgres.sample @@ -0,0 +1,22 @@ +{ + "block_store_path" : "/tmp/block_store/", + "torii_port" : 50051, + "internal_port" : 10001, + "database": { + "type": "postgres", + "host": "localhost", + "port": 5432, + "user": "postgres", + "password": "mysecretpassword", + "working database": "iroha_data", + "maintenance database": "postgres" + }, + "max_proposal_size" : 10, + "proposal_delay" : 5000, + "vote_delay" : 5000, + "mst_enable" : false, + "mst_expiration_time" : 1440, + "max_rounds_delay": 3000, + "stale_stream_max_rounds": 2, + "metrics": "127.0.0.1:8080" +} diff --git a/example/config.sample b/example/config.sample index bced7045d13..c758b10bc03 100644 --- a/example/config.sample +++ b/example/config.sample @@ -1,14 +1,16 @@ { - "block_store_path" : "/tmp/block_store/", "torii_port" : 50051, "internal_port" : 10001, - "pg_opt" : "host=localhost port=5432 user=postgres password=mysecretpassword", + "database": { + "type": "rocksdb", + "path": "/path/to/wsv/folder" + }, "max_proposal_size" : 10, "proposal_delay" : 5000, "vote_delay" : 5000, "mst_enable" : false, "mst_expiration_time" : 1440, "max_rounds_delay": 3000, - "stale_stream_max_rounds": 2 + "stale_stream_max_rounds": 2, + "metrics": "127.0.0.1:8080" } - diff --git a/goSrc/src/vmCaller/evm/native_contract.go b/goSrc/src/vmCaller/evm/native_contract.go index 6f468a2c6d1..296cb42977f 100644 --- a/goSrc/src/vmCaller/evm/native_contract.go +++ b/goSrc/src/vmCaller/evm/native_contract.go @@ -36,6 +36,27 @@ var ( PermFlag: permission.Call, F: transferAsset, }, + native.Function{ + Comment: ` + * @notice Creates a new iroha account + * @param Name account name + * @param Domain domain of account + * @param Key public key of account + * @return 'true' if successful, 'false' otherwise + `, + PermFlag: permission.Call, + F: createAccount, + }, + native.Function{ + Comment: ` + * @notice Adds asset to iroha account + * @param Asset name of asset + * @param Amount mount of asset to be added + * @return 'true' if successful, 'false' otherwise + `, + PermFlag: permission.Call, + F: addAsset, + }, ) ) @@ -98,6 +119,54 @@ func transferAsset(ctx native.Context, args transferAssetArgs) (transferAssetRet return transferAssetRets{Result: true}, nil } +type createAccountArgs struct { + Name string + Domain string + Key string +} + +type createAccountRets struct { + Result bool +} + +func createAccount(ctx native.Context, args createAccountArgs) (createAccountRets, error) { + + err := iroha.CreateAccount(args.Name, args.Domain, args.Key) + if err != nil { + return createAccountRets{Result: false}, err + } + + ctx.Logger.Trace.Log("function", "CreateAccount", + "name", args.Name, + "domain", args.Domain, + "key", args.Key) + + return createAccountRets{Result: true}, nil +} + +type addAssetArgs struct { + Asset string + Amount string +} + +type addAssetRets struct { + Result bool +} + +func addAsset(ctx native.Context, args addAssetArgs) (addAssetRets, error) { + + err := iroha.AddAssetQuantity(args.Asset, args.Amount) + if err != nil { + return addAssetRets{Result: false}, err + } + + ctx.Logger.Trace.Log("function", "addAsset", + "asset", args.Asset, + "amount", args.Amount) + + return addAssetRets{Result: true}, nil +} + func MustCreateNatives() *native.Natives { ns, err := createNatives() if err != nil { diff --git a/goSrc/src/vmCaller/iroha/commands.go b/goSrc/src/vmCaller/iroha/commands.go index 61d52004087..8b44f3b05f0 100644 --- a/goSrc/src/vmCaller/iroha/commands.go +++ b/goSrc/src/vmCaller/iroha/commands.go @@ -52,6 +52,51 @@ func TransferAsset(src, dst, asset, amount string) error { return nil } +func CreateAccount(name string, domain string, key string) error { + command := &pb.Command{Command: &pb.Command_CreateAccount{ + CreateAccount: &pb.CreateAccount{ + AccountName: name, + DomainId: domain, + PublicKey: key, + }}} + commandResult, err := makeProtobufCmdAndExecute(IrohaCommandExecutor, command) + if err != nil { + return err + } + if commandResult.error_code != 0 { + error_extra := "" + error_extra_ptr := commandResult.error_extra.toStringAndRelease() + if error_extra_ptr != nil { + error_extra = ": " + *error_extra_ptr + } + return fmt.Errorf("Error executing CreateAccount command: %s", error_extra) + } + + return nil +} + +func AddAssetQuantity(asset string, amount string) error { + command := &pb.Command{Command: &pb.Command_AddAssetQuantity{ + AddAssetQuantity: &pb.AddAssetQuantity{ + AssetId: asset, + Amount: amount, + }}} + commandResult, err := makeProtobufCmdAndExecute(IrohaCommandExecutor, command) + if err != nil { + return err + } + if commandResult.error_code != 0 { + error_extra := "" + error_extra_ptr := commandResult.error_extra.toStringAndRelease() + if error_extra_ptr != nil { + error_extra = ": " + *error_extra_ptr + } + return fmt.Errorf("Error executing AddAssetQuantity command: %s", error_extra) + } + + return nil +} + // -----------------------Iroha queries--------------------------------------- // Queries asset balance of an account diff --git a/iroha-cli/client.cpp b/iroha-cli/client.cpp index 070af397e54..1ee499e514a 100644 --- a/iroha-cli/client.cpp +++ b/iroha-cli/client.cpp @@ -17,8 +17,7 @@ template std::unique_ptr makeStub(std::string target_ip, int port) { using namespace iroha::network; - static const auto kChannelParams = getDefaultChannelParams(); - return createInsecureClient(target_ip, port, *kChannelParams); + return createInsecureClient(target_ip, port, std::nullopt); } namespace iroha_cli { diff --git a/irohad/CMakeLists.txt b/irohad/CMakeLists.txt index 7d64c595b6a..72a3d741839 100644 --- a/irohad/CMakeLists.txt +++ b/irohad/CMakeLists.txt @@ -16,3 +16,4 @@ add_subdirectory(synchronizer) add_subdirectory(multi_sig_transactions) add_subdirectory(pending_txs_storage) add_subdirectory(util) +add_subdirectory(maintenance) diff --git a/irohad/ametsuchi/CMakeLists.txt b/irohad/ametsuchi/CMakeLists.txt index fa0415babb9..d624204deca 100644 --- a/irohad/ametsuchi/CMakeLists.txt +++ b/irohad/ametsuchi/CMakeLists.txt @@ -23,6 +23,19 @@ target_link_libraries(pool_wrapper SOCI::core ) +add_library(rocksdb_block_storage + impl/rocksdb_block_storage.cpp + impl/rocksdb_block_storage_factory.cpp + ) + +target_link_libraries(rocksdb_block_storage + libs_files + shared_model_proto_backend + logger + Boost::boost + RocksDB::rocksdb + ) + add_library(flat_file_storage impl/flat_file/flat_file.cpp impl/flat_file_block_storage.cpp @@ -76,12 +89,26 @@ target_link_libraries(proto_command_executor shared_model_stateless_validation ) +add_library(query_executor_base + impl/query_executor_base.cpp + ) +target_link_libraries(query_executor_base + logger + ) + +add_library(rocksdb_query_executor + impl/rocksdb_query_executor.cpp + ) +target_link_libraries(rocksdb_query_executor + query_executor_base + ) + add_library(postgres_query_executor impl/postgres_query_executor.cpp ) target_link_libraries(postgres_query_executor SOCI::core - logger + query_executor_base ) target_compile_definitions(postgres_query_executor PRIVATE SOCI_USE_BOOST HAVE_BOOST @@ -92,13 +119,19 @@ add_library(proto_specific_query_executor ) target_link_libraries(proto_specific_query_executor postgres_query_executor + rocksdb_query_executor shared_model_proto_backend shared_model_stateless_validation ) add_library(ametsuchi + impl/storage_base.cpp impl/storage_impl.cpp + impl/rocksdb_storage_impl.cpp + impl/rocksdb_temporary_wsv_impl.cpp + impl/block_query_base.cpp impl/temporary_wsv_impl.cpp + impl/postgres_temporary_wsv_impl.cpp impl/mutable_storage_impl.cpp impl/postgres_wsv_query.cpp impl/postgres_wsv_command.cpp @@ -106,28 +139,52 @@ add_library(ametsuchi impl/postgres_block_query.cpp impl/setting_query.cpp impl/postgres_setting_query.cpp + impl/rocksdb_settings_query.cpp + impl/rocksdb_block_query.cpp impl/executor_common.cpp + impl/command_executor.cpp impl/postgres_command_executor.cpp impl/wsv_restorer_impl.cpp impl/postgres_specific_query_executor.cpp impl/tx_presence_cache_impl.cpp + ) + +add_library(im_memory_block_storage impl/in_memory_block_storage.cpp impl/in_memory_block_storage_factory.cpp ) +target_link_libraries(im_memory_block_storage + common + logger + ) -add_library(postgres_indexer - impl/postgres_indexer.cpp - impl/postgres_block_index.cpp +add_library(block_indexer + impl/block_index_impl.cpp ) -target_link_libraries(postgres_indexer +target_link_libraries(block_indexer common logger shared_model_interfaces shared_model_cryptography + ) + +add_library(postgres_indexer + impl/postgres_indexer.cpp + ) +target_link_libraries(postgres_indexer + block_indexer SOCI::postgresql SOCI::core ) +add_library(rocksdb_indexer + impl/rocksdb_indexer.cpp + ) +target_link_libraries(rocksdb_indexer + block_indexer + RocksDB::rocksdb + ) + target_compile_definitions(postgres_indexer PRIVATE SOCI_USE_BOOST HAVE_BOOST ) @@ -154,12 +211,14 @@ endif() target_link_libraries(ametsuchi default_vm_call pg_connection_init + rdb_connection_init + ametsuchi_rocksdb flat_file_storage + rocksdb_block_storage postgres_indexer postgres_storage logger logger_manager - rxcpp libs_files common postgres_burrow_storage @@ -172,8 +231,31 @@ target_link_libraries(ametsuchi SOCI::postgresql SOCI::core postgres_query_executor + rocksdb_query_executor + im_memory_block_storage ) target_compile_definitions(ametsuchi PRIVATE SOCI_USE_BOOST HAVE_BOOST ) +add_library(ametsuchi_rocksdb + impl/rocksdb_wsv_query.cpp + impl/rocksdb_wsv_command.cpp + impl/rocksdb_command_executor.cpp + impl/rocksdb_specific_query_executor.cpp + ) +target_link_libraries(ametsuchi_rocksdb + RocksDB::rocksdb + logger + logger_manager + libs_files + common + Boost::boost + Boost::filesystem + im_memory_block_storage + shared_model_proto_backend + shared_model_plain_backend + shared_model_interfaces_factories + rocksdb_indexer + shared_model_interfaces + ) diff --git a/irohad/ametsuchi/block_query.hpp b/irohad/ametsuchi/block_query.hpp index 2c28fb1f9c2..a256abe1eff 100644 --- a/irohad/ametsuchi/block_query.hpp +++ b/irohad/ametsuchi/block_query.hpp @@ -50,6 +50,11 @@ namespace iroha { virtual shared_model::interface::types::HeightType getTopBlockHeight() = 0; + /** + * Reloads blockstore + */ + virtual void reloadBlockstore() = 0; + /** * Synchronously checks whether transaction with given hash is present in * any block diff --git a/irohad/ametsuchi/block_storage.hpp b/irohad/ametsuchi/block_storage.hpp index 0ce747b7393..53eb2ac8184 100644 --- a/irohad/ametsuchi/block_storage.hpp +++ b/irohad/ametsuchi/block_storage.hpp @@ -6,11 +6,11 @@ #ifndef IROHA_BLOCK_STORAGE_HPP #define IROHA_BLOCK_STORAGE_HPP +#include #include #include #include -#include #include "common/result_fwd.hpp" #include "interfaces/iroha_internal/block.hpp" @@ -41,6 +41,11 @@ namespace iroha { */ virtual size_t size() const = 0; + /** + * Reloads blocks in case their were modified externally + */ + virtual void reload() = 0; + /** * Clears the contents of storage */ diff --git a/irohad/ametsuchi/command_executor.hpp b/irohad/ametsuchi/command_executor.hpp index 2c6606a2cae..a0b654926a7 100644 --- a/irohad/ametsuchi/command_executor.hpp +++ b/irohad/ametsuchi/command_executor.hpp @@ -6,6 +6,7 @@ #ifndef IROHA_AMETSUCHI_COMMAND_EXECUTOR_HPP #define IROHA_AMETSUCHI_COMMAND_EXECUTOR_HPP +#include "ametsuchi/impl/db_transaction.hpp" #include "common/result.hpp" #include "interfaces/common_objects/types.hpp" @@ -25,6 +26,10 @@ namespace iroha { struct CommandError { using ErrorCodeType = uint32_t; + CommandError(std::string_view command_name, + ErrorCodeType error_code, + std::string_view error_extra); + std::string command_name; ErrorCodeType error_code; std::string error_extra; @@ -51,6 +56,10 @@ namespace iroha { const std::string &tx_hash, shared_model::interface::types::CommandIndexType cmd_index, bool do_validation) = 0; + + virtual void skipChanges() = 0; + + virtual DatabaseTransaction &dbSession() = 0; }; } // namespace ametsuchi } // namespace iroha diff --git a/irohad/ametsuchi/impl/postgres_block_index.cpp b/irohad/ametsuchi/impl/block_index_impl.cpp similarity index 86% rename from irohad/ametsuchi/impl/postgres_block_index.cpp rename to irohad/ametsuchi/impl/block_index_impl.cpp index 7af2a940894..74693a6c7ad 100644 --- a/irohad/ametsuchi/impl/postgres_block_index.cpp +++ b/irohad/ametsuchi/impl/block_index_impl.cpp @@ -3,7 +3,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include "ametsuchi/impl/postgres_block_index.hpp" +#include "ametsuchi/impl/block_index_impl.hpp" #include #include @@ -38,7 +38,7 @@ namespace { // Collect all assets belonging to creator, sender, and receiver // to make account_id:height:asset_id -> list of tx indexes // for transfer asset in command -void PostgresBlockIndex::makeAccountAssetIndex( +void BlockIndexImpl::makeAccountAssetIndex( const AccountIdType &account_id, shared_model::interface::types::HashType const &hash, shared_model::interface::types::TimestampType const ts, @@ -67,17 +67,18 @@ void PostgresBlockIndex::makeAccountAssetIndex( } } -PostgresBlockIndex::PostgresBlockIndex(std::unique_ptr indexer, - logger::LoggerPtr log) +BlockIndexImpl::BlockIndexImpl(std::unique_ptr indexer, + logger::LoggerPtr log) : indexer_(std::move(indexer)), log_(std::move(log)) {} -void PostgresBlockIndex::index(const shared_model::interface::Block &block) { +void BlockIndexImpl::index(const shared_model::interface::Block &block) { auto height = block.height(); for (auto tx : block.transactions() | boost::adaptors::indexed(0)) { const auto &creator_id = tx.value().creatorAccountId(); const TxPosition position{height, static_cast(tx.index())}; - indexer_->committedTxHash(tx.value().hash()); + indexer_->committedTxHash( + position, tx.value().createdTime(), tx.value().hash()); makeAccountAssetIndex(creator_id, tx.value().hash(), tx.value().createdTime(), @@ -90,8 +91,9 @@ void PostgresBlockIndex::index(const shared_model::interface::Block &block) { position); } + const TxPosition position{height, static_cast(0ull)}; for (const auto &rejected_tx_hash : block.rejected_transactions_hashes()) { - indexer_->rejectedTxHash(rejected_tx_hash); + indexer_->rejectedTxHash(position, 0ull, rejected_tx_hash); } if (auto e = resultToOptionalError(indexer_->flush())) { diff --git a/irohad/ametsuchi/impl/postgres_block_index.hpp b/irohad/ametsuchi/impl/block_index_impl.hpp similarity index 90% rename from irohad/ametsuchi/impl/postgres_block_index.hpp rename to irohad/ametsuchi/impl/block_index_impl.hpp index 4e9561abf4d..3ef40a26642 100644 --- a/irohad/ametsuchi/impl/postgres_block_index.hpp +++ b/irohad/ametsuchi/impl/block_index_impl.hpp @@ -27,10 +27,9 @@ namespace iroha { * 2. account -> block for source and destination accounts * 3. (account, height) -> list of txes */ - class PostgresBlockIndex : public BlockIndex { + class BlockIndexImpl : public BlockIndex { public: - PostgresBlockIndex(std::unique_ptr indexer, - logger::LoggerPtr log); + BlockIndexImpl(std::unique_ptr indexer, logger::LoggerPtr log); /// Index a block. void index(const shared_model::interface::Block &block) override; diff --git a/irohad/ametsuchi/impl/block_query_base.cpp b/irohad/ametsuchi/impl/block_query_base.cpp new file mode 100644 index 00000000000..4e78d35fb3f --- /dev/null +++ b/irohad/ametsuchi/impl/block_query_base.cpp @@ -0,0 +1,54 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/block_query_base.hpp" + +#include "common/byteutils.hpp" +#include "common/cloneable.hpp" +#include "logger/logger.hpp" + +namespace iroha::ametsuchi { + + BlockQueryBase::BlockQueryBase(BlockStorage &block_storage, + logger::LoggerPtr log) + : block_storage_(block_storage), log_(std::move(log)) {} + + BlockQuery::BlockResult BlockQueryBase::getBlock( + shared_model::interface::types::HeightType height) { + auto block = block_storage_.fetch(height); + if (not block) { + return expected::makeError(GetBlockError{ + GetBlockError::Code::kNoBlock, + fmt::format("Failed to retrieve block with height {}", height)}); + } + return std::move(*block); + } + + shared_model::interface::types::HeightType + BlockQueryBase::getTopBlockHeight() { + return block_storage_.size(); + } + + void BlockQueryBase::reloadBlockstore() { + block_storage_.reload(); + } + + std::optional BlockQueryBase::checkTxPresence( + const shared_model::crypto::Hash &hash) { + int res = -1; + if (auto status = getTxStatus(hash); !status) + return std::nullopt; + else + res = *status; + + if (res > 0) + return tx_cache_status_responses::Committed{hash}; + else if (res == 0) + return tx_cache_status_responses::Rejected{hash}; + + return tx_cache_status_responses::Missing{hash}; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/block_query_base.hpp b/irohad/ametsuchi/impl/block_query_base.hpp new file mode 100644 index 00000000000..9622dd23416 --- /dev/null +++ b/irohad/ametsuchi/impl/block_query_base.hpp @@ -0,0 +1,47 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_BLOCK_QUERY_BASE_HPP +#define IROHA_BLOCK_QUERY_BASE_HPP + +#include + +#include "ametsuchi/block_query.hpp" +#include "ametsuchi/block_storage.hpp" +#include "logger/logger_fwd.hpp" + +namespace iroha::ametsuchi { + + /** + * Class which implements BlockQuery. + */ + class BlockQueryBase : public BlockQuery { + public: + BlockQueryBase(BlockStorage &block_storage, logger::LoggerPtr log); + + BlockResult getBlock( + shared_model::interface::types::HeightType height) override; + + shared_model::interface::types::HeightType getTopBlockHeight() override; + + void reloadBlockstore() override; + + std::optional checkTxPresence( + const shared_model::crypto::Hash &hash) override; + + // res > 0 => Committed + // res == 0 => Rejected + // res < 0 => Missing + virtual std::optional getTxStatus( + const shared_model::crypto::Hash &hash) = 0; + + protected: + BlockStorage &block_storage_; + logger::LoggerPtr log_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_BLOCK_QUERY_HPP diff --git a/irohad/ametsuchi/impl/command_executor.cpp b/irohad/ametsuchi/impl/command_executor.cpp new file mode 100644 index 00000000000..83289ffceaf --- /dev/null +++ b/irohad/ametsuchi/impl/command_executor.cpp @@ -0,0 +1,22 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/command_executor.hpp" + +#include + +using namespace iroha::ametsuchi; + +CommandError::CommandError(std::string_view command_name, + ErrorCodeType error_code, + std::string_view error_extra) + : command_name(command_name), + error_code(error_code), + error_extra(error_extra) {} + +std::string CommandError::toString() const { + return fmt::format( + "{}: {} with extra info '{}'", command_name, error_code, error_extra); +} diff --git a/irohad/ametsuchi/impl/db_transaction.hpp b/irohad/ametsuchi/impl/db_transaction.hpp new file mode 100644 index 00000000000..0353596b0db --- /dev/null +++ b/irohad/ametsuchi/impl/db_transaction.hpp @@ -0,0 +1,27 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_DB_TRANSACTION_HPP +#define IROHA_DB_TRANSACTION_HPP + +#include + +namespace iroha::ametsuchi { + + class DatabaseTransaction { + public: + virtual void begin() = 0; + virtual void savepoint(std::string const &name) = 0; + virtual void commit() = 0; + virtual void rollback() = 0; + virtual void rollbackToSavepoint(std::string const &name) = 0; + virtual void releaseSavepoint(std::string const &name) = 0; + virtual void prepare(std::string const &name) = 0; + virtual void commitPrepared(std::string const &name) = 0; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_DB_TRANSACTION_HPP diff --git a/irohad/ametsuchi/impl/executor_common.cpp b/irohad/ametsuchi/impl/executor_common.cpp index 38c1ef8c405..e0995d70641 100644 --- a/irohad/ametsuchi/impl/executor_common.cpp +++ b/irohad/ametsuchi/impl/executor_common.cpp @@ -5,8 +5,8 @@ #include "ametsuchi/impl/executor_common.hpp" -#include -#include +#include + #include "interfaces/permissions.hpp" namespace iroha { @@ -17,12 +17,32 @@ namespace iroha { {shared_model::interface::permissions::Role::kRoot}) .toBitstring()}; - shared_model::interface::types::DomainIdType getDomainFromName( - const shared_model::interface::types::AccountIdType &account_id) { + std::string_view getDomainFromName(std::string_view account_id) { // TODO 03.10.18 andrei: IR-1728 Move getDomainFromName to shared_model - std::vector res; - boost::split(res, account_id, boost::is_any_of("@")); - return res.at(1); + return splitId(account_id).at(1); + } + + std::vector splitId(std::string_view id) { + return split(id, "@#"); + } + + std::vector split(std::string_view str, + std::string_view delims) { + std::vector output; + + for (auto first = str.data(), + second = str.data(), + last = first + str.size(); + second != last && first != last; + first = second + 1) { + second = std::find_first_of( + first, last, std::cbegin(delims), std::cend(delims)); + + if (first != second) + output.emplace_back(first, second - first); + } + + return output; } } // namespace ametsuchi diff --git a/irohad/ametsuchi/impl/executor_common.hpp b/irohad/ametsuchi/impl/executor_common.hpp index acc1f322655..ac67210aeee 100644 --- a/irohad/ametsuchi/impl/executor_common.hpp +++ b/irohad/ametsuchi/impl/executor_common.hpp @@ -8,15 +8,40 @@ #include "interfaces/common_objects/types.hpp" -namespace iroha { - namespace ametsuchi { +#include +#include - extern const std::string kRootRolePermStr; +namespace iroha::ametsuchi { - shared_model::interface::types::DomainIdType getDomainFromName( - const shared_model::interface::types::AccountIdType &account_id); + extern const std::string kRootRolePermStr; - } // namespace ametsuchi -} // namespace iroha + std::string_view getDomainFromName(std::string_view account_id); + + std::vector splitId(std::string_view id); + + std::vector split(std::string_view str, + std::string_view delims); + + template + std::array staticSplitId( + std::string_view const str, std::string_view const delims = "@#") { + std::array output; + + auto it_first = str.data(); + auto it_second = str.data(); + auto it_end = str.data() + str.size(); + size_t counter = 0; + + while (it_first != it_end && counter < C) { + it_second = std::find_first_of( + it_first, it_end, std::cbegin(delims), std::cend(delims)); + + output[counter++] = std::string_view(it_first, it_second - it_first); + it_first = it_second != it_end ? it_second + 1ull : it_end; + } + return output; + } + +} // namespace iroha::ametsuchi #endif // IROHA_AMETSUCHI_EXECUTOR_COMMON_HPP diff --git a/irohad/ametsuchi/impl/flat_file/flat_file.cpp b/irohad/ametsuchi/impl/flat_file/flat_file.cpp index 17f5966ea8c..7d6826933f1 100644 --- a/irohad/ametsuchi/impl/flat_file/flat_file.cpp +++ b/irohad/ametsuchi/impl/flat_file/flat_file.cpp @@ -5,29 +5,35 @@ #include "ametsuchi/impl/flat_file/flat_file.hpp" -#include -#include -#include -#include - #include #include #include #include #include +#include +#include +#include +#include + #include "common/files.hpp" #include "common/result.hpp" #include "logger/logger.hpp" #ifdef _WIN32 +// We skip format here because of strong including order +// clang-format off #include #include +// clang-format on #endif using namespace iroha::ametsuchi; using Identifier = FlatFile::Identifier; using BlockIdCollectionType = FlatFile::BlockIdCollectionType; +const std::string FlatFile::kTempFileExtension = ".tmp"; +const std::regex FlatFile::kBlockFilenameRegex = std::regex("[0-9]{16}"); + // ----------| public API |---------- std::string FlatFile::id_to_name(Identifier id) { @@ -57,28 +63,19 @@ FlatFile::create(const std::string &path, logger::LoggerPtr log) { "Cannot create storage dir '{}': {}", path, err.message()); } - BlockIdCollectionType files_found; - for (auto it = boost::filesystem::directory_iterator{path}; - it != boost::filesystem::directory_iterator{}; - ++it) { - if (auto id = FlatFile::name_to_id(it->path().filename().string())) { - files_found.insert(*id); - } else { - boost::filesystem::remove(it->path()); - } - } - - return std::make_unique( - path, std::move(files_found), private_tag{}, std::move(log)); + return std::make_unique(path, private_tag{}, std::move(log)); } bool FlatFile::add(Identifier id, const Bytes &block) { // TODO(x3medima17): Change bool to generic Result return type + const auto tmp_file_name = boost::filesystem::path{dump_dir_} + / (id_to_name(id) + kTempFileExtension); const auto file_name = boost::filesystem::path{dump_dir_} / id_to_name(id); // Write block to binary file - if (boost::filesystem::exists(file_name)) { + if (boost::filesystem::exists(tmp_file_name) + || boost::filesystem::exists(file_name)) { // File already exist log_->warn("insertion for {} failed, because file already exists", id); return false; @@ -86,7 +83,7 @@ bool FlatFile::add(Identifier id, const Bytes &block) { // New file will be created boost::iostreams::stream file; try { - file.open(file_name, std::ofstream::binary); + file.open(tmp_file_name, std::ofstream::binary); } catch (std::ios_base::failure const &e) { log_->warn("Cannot open file by index {} for writing: {}", id, e.what()); return false; @@ -119,6 +116,16 @@ bool FlatFile::add(Identifier id, const Bytes &block) { return false; } + file->close(); + + boost::system::error_code error_code; + boost::filesystem::rename(tmp_file_name, file_name, error_code); + if (error_code != boost::system::errc::success) { + log_->error( + "insertion for {} failed, because {}", id, error_code.message()); + return false; + } + available_blocks_.insert(id); return true; } @@ -142,6 +149,24 @@ Identifier FlatFile::last_id() const { return (available_blocks_.empty()) ? 0 : *available_blocks_.rbegin(); } +void FlatFile::reload() { + available_blocks_.clear(); + for (auto it = boost::filesystem::directory_iterator{dump_dir_}; + it != boost::filesystem::directory_iterator{}; + ++it) { + // skip non-block files + if (!std::regex_match(it->path().filename().string(), + kBlockFilenameRegex)) { + continue; + } + if (auto id = FlatFile::name_to_id(it->path().filename().string())) { + available_blocks_.insert(*id); + } else { + boost::filesystem::remove(it->path()); + } + } +} + void FlatFile::dropAll() { iroha::remove_dir_contents(dump_dir_, log_); available_blocks_.clear(); @@ -154,9 +179,8 @@ const BlockIdCollectionType &FlatFile::blockIdentifiers() const { // ----------| private API |---------- FlatFile::FlatFile(std::string path, - BlockIdCollectionType existing_files, FlatFile::private_tag, logger::LoggerPtr log) - : dump_dir_(std::move(path)), - available_blocks_(std::move(existing_files)), - log_{std::move(log)} {} + : dump_dir_(std::move(path)), log_{std::move(log)} { + reload(); +} diff --git a/irohad/ametsuchi/impl/flat_file/flat_file.hpp b/irohad/ametsuchi/impl/flat_file/flat_file.hpp index 46f35474c21..726317087db 100644 --- a/irohad/ametsuchi/impl/flat_file/flat_file.hpp +++ b/irohad/ametsuchi/impl/flat_file/flat_file.hpp @@ -6,11 +6,11 @@ #ifndef IROHA_FLAT_FILE_HPP #define IROHA_FLAT_FILE_HPP -#include "ametsuchi/key_value_storage.hpp" - #include +#include #include +#include "ametsuchi/key_value_storage.hpp" #include "common/result_fwd.hpp" #include "logger/logger_fwd.hpp" @@ -34,6 +34,10 @@ namespace iroha { static const uint32_t DIGIT_CAPACITY = 16; + static const std::string kTempFileExtension; + + static const std::regex kBlockFilenameRegex; + /** * Convert id to a string representation. The string representation is * always DIGIT_CAPACITY-character width regardless of the value of `id`. @@ -72,6 +76,8 @@ namespace iroha { Identifier last_id() const override; + void reload() override; + void dropAll() override; /** @@ -94,11 +100,9 @@ namespace iroha { /** * Create storage in path * @param path - folder of storage - * @param existing_files - collection of existing files names * @param log to print progress */ FlatFile(std::string path, - BlockIdCollectionType existing_files, FlatFile::private_tag, logger::LoggerPtr log); @@ -108,6 +112,9 @@ namespace iroha { */ const std::string dump_dir_; + /** + * Blocks in storage, can be modified externally + */ BlockIdCollectionType available_blocks_; logger::LoggerPtr log_; diff --git a/irohad/ametsuchi/impl/flat_file_block_storage.cpp b/irohad/ametsuchi/impl/flat_file_block_storage.cpp index fc63b50e0e9..54c593f28f7 100644 --- a/irohad/ametsuchi/impl/flat_file_block_storage.cpp +++ b/irohad/ametsuchi/impl/flat_file_block_storage.cpp @@ -58,7 +58,11 @@ FlatFileBlockStorage::fetch( } size_t FlatFileBlockStorage::size() const { - return flat_file_storage_->blockIdentifiers().size(); + return flat_file_storage_->last_id(); +} + +void FlatFileBlockStorage::reload() { + flat_file_storage_->reload(); } void FlatFileBlockStorage::clear() { diff --git a/irohad/ametsuchi/impl/flat_file_block_storage.hpp b/irohad/ametsuchi/impl/flat_file_block_storage.hpp index 968b28d6af2..3f71bf04cc7 100644 --- a/irohad/ametsuchi/impl/flat_file_block_storage.hpp +++ b/irohad/ametsuchi/impl/flat_file_block_storage.hpp @@ -30,6 +30,8 @@ namespace iroha { size_t size() const override; + void reload() override; + void clear() override; expected::Result forEach( diff --git a/irohad/ametsuchi/impl/in_memory_block_storage.cpp b/irohad/ametsuchi/impl/in_memory_block_storage.cpp index e6bfc9fbc0e..126c054e239 100644 --- a/irohad/ametsuchi/impl/in_memory_block_storage.cpp +++ b/irohad/ametsuchi/impl/in_memory_block_storage.cpp @@ -27,6 +27,9 @@ InMemoryBlockStorage::fetch( size_t InMemoryBlockStorage::size() const { return block_store_.size(); } +void InMemoryBlockStorage::reload() { + // no need to reload +} void InMemoryBlockStorage::clear() { block_store_.clear(); diff --git a/irohad/ametsuchi/impl/in_memory_block_storage.hpp b/irohad/ametsuchi/impl/in_memory_block_storage.hpp index 1127a859bf4..4f0011b874b 100644 --- a/irohad/ametsuchi/impl/in_memory_block_storage.hpp +++ b/irohad/ametsuchi/impl/in_memory_block_storage.hpp @@ -26,6 +26,8 @@ namespace iroha { size_t size() const override; + void reload() override; + void clear() override; expected::Result forEach( diff --git a/irohad/ametsuchi/impl/mutable_storage_impl.cpp b/irohad/ametsuchi/impl/mutable_storage_impl.cpp index 0695ee93092..0a9e82c1236 100644 --- a/irohad/ametsuchi/impl/mutable_storage_impl.cpp +++ b/irohad/ametsuchi/impl/mutable_storage_impl.cpp @@ -7,11 +7,10 @@ #include #include -#include #include #include "ametsuchi/command_executor.hpp" +#include "ametsuchi/impl/block_index_impl.hpp" #include "ametsuchi/impl/peer_query_wsv.hpp" -#include "ametsuchi/impl/postgres_block_index.hpp" #include "ametsuchi/impl/postgres_command_executor.hpp" #include "ametsuchi/impl/postgres_indexer.hpp" #include "ametsuchi/impl/postgres_wsv_command.hpp" @@ -23,164 +22,149 @@ #include "logger/logger.hpp" #include "logger/logger_manager.hpp" -namespace iroha { - namespace ametsuchi { - MutableStorageImpl::MutableStorageImpl( - boost::optional> ledger_state, - std::shared_ptr command_executor, - std::unique_ptr block_storage, - logger::LoggerManagerTreePtr log_manager) - : ledger_state_(std::move(ledger_state)), - sql_(command_executor->getSession()), - wsv_command_(std::make_unique(sql_)), - peer_query_( - std::make_unique(std::make_shared( - sql_, log_manager->getChild("WsvQuery")->getLogger()))), - block_index_(std::make_unique( - std::make_unique(sql_), - log_manager->getChild("PostgresBlockIndex")->getLogger())), - transaction_executor_(std::make_unique( - std::move(command_executor))), - block_storage_(std::move(block_storage)), - committed(false), - log_(log_manager->getLogger()) { - sql_ << "BEGIN"; - } - - bool MutableStorageImpl::apply( - std::shared_ptr block, - MutableStoragePredicate predicate) { - auto execute_transaction = [this](auto &transaction) -> bool { - auto result = transaction_executor_->execute(transaction, false); - auto error = expected::resultToOptionalError(result); - if (error) { - log_->error(error->command_error.toString()); - } - auto ok = !error; - return ok; - }; - - log_->info("Applying block: height {}, hash {}", - block->height(), - block->hash().hex()); - - auto block_applied = - (not ledger_state_ or predicate(block, *ledger_state_.value())) - and std::all_of(block->transactions().begin(), - block->transactions().end(), - execute_transaction); - if (block_applied) { - if (auto e = - expected::resultToOptionalError(wsv_command_->setTopBlockInfo( - TopBlockInfo{block->height(), block->hash()}))) { - log_->error("{}", e.value()); - return false; - } - - block_storage_->insert(block); - block_index_->index(*block); - - auto opt_ledger_peers = peer_query_->getLedgerPeers(); - if (not opt_ledger_peers) { - log_->error("Failed to get ledger peers!"); - return false; - } - - ledger_state_ = std::make_shared( - std::move(*opt_ledger_peers), block->height(), block->hash()); +namespace iroha::ametsuchi { + + MutableStorageImpl::MutableStorageImpl( + boost::optional> ledger_state, + std::unique_ptr wsv_command, + std::unique_ptr peer_query, + std::unique_ptr block_index, + std::shared_ptr command_executor, + std::unique_ptr block_storage, + logger::LoggerManagerTreePtr log_manager) + : ledger_state_(std::move(ledger_state)), + db_tx_(command_executor->dbSession()), + wsv_command_(std::move(wsv_command)), + peer_query_(std::move(peer_query)), + block_index_(std::move(block_index)), + transaction_executor_( + std::make_unique(std::move(command_executor))), + block_storage_(std::move(block_storage)), + committed(false), + log_(log_manager->getLogger()) { + db_tx_.begin(); + } + + bool MutableStorageImpl::applyBlockIf( + std::shared_ptr block, + MutableStoragePredicate predicate) { + auto execute_transaction = [this](auto &transaction) -> bool { + auto result = transaction_executor_->execute(transaction, false); + auto error = expected::resultToOptionalError(result); + if (error) { + log_->error(error->command_error.toString()); + } + auto ok = !error; + return ok; + }; + + log_->info("Applying block: height {}, hash {}", + block->height(), + block->hash().hex()); + + auto block_applied = + (not ledger_state_ or predicate(block, *ledger_state_.value())) + and std::all_of(block->transactions().begin(), + block->transactions().end(), + execute_transaction); + if (block_applied) { + if (auto e = + expected::resultToOptionalError(wsv_command_->setTopBlockInfo( + TopBlockInfo{block->height(), block->hash()}))) { + log_->error("{}", e.value()); + return false; } - return block_applied; - } - - template - bool MutableStorageImpl::withSavepoint(Function &&function) { - try { - sql_ << "SAVEPOINT savepoint_"; - - auto function_executed = std::forward(function)(); + block_storage_->insert(block); + block_index_->index(*block); - if (function_executed) { - sql_ << "RELEASE SAVEPOINT savepoint_"; - } else { - sql_ << "ROLLBACK TO SAVEPOINT savepoint_"; - } - return function_executed; - } catch (std::exception &e) { - log_->warn("Apply has failed. Reason: {}", e.what()); + auto opt_ledger_peers = peer_query_->getLedgerPeers(); + if (not opt_ledger_peers) { + log_->error("Failed to get ledger peers!"); return false; } - } - bool MutableStorageImpl::apply( - std::shared_ptr block) { - return withSavepoint([&] { - return this->apply(block, [](const auto &, auto &) { return true; }); - }); + ledger_state_ = std::make_shared( + std::move(*opt_ledger_peers), block->height(), block->hash()); } - bool MutableStorageImpl::apply( - rxcpp::observable> - blocks, - MutableStoragePredicate predicate) { - try { - return blocks - .all([&](auto block) { - return withSavepoint( - [&] { return this->apply(block, predicate); }); - }) - .as_blocking() - .first(); - } catch (std::runtime_error const &e) { - log_->warn("Apply has been failed: {}", e.what()); - return false; + return block_applied; + } + + template + bool MutableStorageImpl::withSavepoint(Function &&function) { + try { + db_tx_.savepoint("savepoint_"); + auto function_executed = std::forward(function)(); + + if (function_executed) { + db_tx_.releaseSavepoint("savepoint_"); + } else { + db_tx_.rollbackToSavepoint("savepoint_"); } + return function_executed; + } catch (std::exception &e) { + log_->warn("Apply has failed. Reason: {}", e.what()); + return false; } - - boost::optional> - MutableStorageImpl::getLedgerState() const { - return ledger_state_; + } + + bool MutableStorageImpl::apply( + std::shared_ptr block) { + return withSavepoint([&] { + return this->applyBlockIf(block, + [](const auto &, auto &) { return true; }); + }); + } + + bool MutableStorageImpl::applyIf( + std::shared_ptr block, + MutableStoragePredicate predicate) { + return withSavepoint([&] { return this->applyBlockIf(block, predicate); }); + } + + boost::optional> + MutableStorageImpl::getLedgerState() const { + return ledger_state_; + } + + expected::Result + MutableStorageImpl::commit(BlockStorage &block_storage) && { + if (committed) { + assert(not committed); + return "Tried to commit mutable storage twice."; } - - expected::Result - MutableStorageImpl::commit(BlockStorage &block_storage) && { - if (committed) { - assert(not committed); - return "Tried to commit mutable storage twice."; + if (not ledger_state_) { + assert(ledger_state_); + return "Tried to commit mutable storage with no blocks applied."; + } + return block_storage_->forEach([&block_storage](auto const &block) + -> expected::Result { + if (not block_storage.insert(block)) { + return fmt::format("Failed to insert block {}", *block); } - if (not ledger_state_) { - assert(ledger_state_); - return "Tried to commit mutable storage with no blocks applied."; + return {}; + }) | [this]() + -> expected::Result { + try { + db_tx_.commit(); + committed = true; + } catch (std::exception &e) { + return expected::makeError(e.what()); } - return block_storage_->forEach( - [&block_storage]( - auto const &block) -> expected::Result { - if (not block_storage.insert(block)) { - return fmt::format("Failed to insert block {}", *block); - } - return {}; - }) - | [this]() -> expected::Result { - try { - sql_ << "COMMIT"; - committed = true; - } catch (std::exception &e) { - return expected::makeError(e.what()); - } - return MutableStorage::CommitResult{ledger_state_.value(), - std::move(block_storage_)}; - }; - } + return MutableStorage::CommitResult{ledger_state_.value(), + std::move(block_storage_)}; + }; + } - MutableStorageImpl::~MutableStorageImpl() { - if (not committed) { - try { - sql_ << "ROLLBACK"; - } catch (std::exception &e) { - log_->warn("Apply has been failed. Reason: {}", e.what()); - } + MutableStorageImpl::~MutableStorageImpl() { + if (not committed) { + try { + db_tx_.rollback(); + } catch (std::exception &e) { + log_->warn("Apply has been failed. Reason: {}", e.what()); } } - } // namespace ametsuchi -} // namespace iroha + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/mutable_storage_impl.hpp b/irohad/ametsuchi/impl/mutable_storage_impl.hpp index aac7a3b138f..7532b600886 100644 --- a/irohad/ametsuchi/impl/mutable_storage_impl.hpp +++ b/irohad/ametsuchi/impl/mutable_storage_impl.hpp @@ -10,76 +10,79 @@ #include #include "ametsuchi/block_storage.hpp" +#include "ametsuchi/impl/db_transaction.hpp" #include "common/result.hpp" #include "interfaces/common_objects/types.hpp" #include "logger/logger_fwd.hpp" #include "logger/logger_manager_fwd.hpp" -namespace iroha { - namespace ametsuchi { - class BlockIndex; - class PeerQuery; - class PostgresCommandExecutor; - class PostgresWsvCommand; - class TransactionExecutor; - - class MutableStorageImpl : public MutableStorage { - friend class StorageImpl; - - public: - MutableStorageImpl( - boost::optional> - ledger_state, - std::shared_ptr command_executor, - std::unique_ptr block_storage, - logger::LoggerManagerTreePtr log_manager); - - bool apply( - std::shared_ptr block) override; - - bool apply(rxcpp::observable< - std::shared_ptr> blocks, +namespace iroha::ametsuchi { + + class BlockIndex; + class PeerQuery; + class CommandExecutor; + class WsvCommand; + class TransactionExecutor; + + class MutableStorageImpl : public MutableStorage { + friend class StorageImpl; + + public: + MutableStorageImpl( + boost::optional> ledger_state, + std::unique_ptr wsv_command, + std::unique_ptr peer_query, + std::unique_ptr block_index, + std::shared_ptr command_executor, + std::unique_ptr block_storage, + logger::LoggerManagerTreePtr log_manager); + + bool apply( + std::shared_ptr block) override; + + bool applyIf(std::shared_ptr block, MutableStoragePredicate predicate) override; - boost::optional> - getLedgerState() const; - - expected::Result commit( - BlockStorage &block_storage) - && override; - - ~MutableStorageImpl() override; - - private: - /** - * Performs a function inside savepoint, does a rollback if function - * returned false, and removes the savepoint otherwise. Returns function - * result - */ - template - bool withSavepoint(Function &&function); - - /** - * Verifies whether the block is applicable using predicate, and applies - * the block - */ - bool apply(std::shared_ptr block, - MutableStoragePredicate predicate); - - boost::optional> ledger_state_; - - soci::session &sql_; - std::unique_ptr wsv_command_; - std::unique_ptr peer_query_; - std::unique_ptr block_index_; - std::shared_ptr transaction_executor_; - std::unique_ptr block_storage_; - - bool committed; - - logger::LoggerPtr log_; - }; - } // namespace ametsuchi -} // namespace iroha + boost::optional> getLedgerState() + const; + + expected::Result commit( + BlockStorage &block_storage) + && override; + + ~MutableStorageImpl() override; + + private: + /** + * Performs a function inside savepoint, does a rollback if function + * returned false, and removes the savepoint otherwise. Returns function + * result + */ + template + bool withSavepoint(Function &&function); + + /** + * Verifies whether the block is applicable using predicate, and applies + * the block + */ + bool applyBlockIf( + std::shared_ptr block, + MutableStoragePredicate predicate); + + boost::optional> ledger_state_; + + DatabaseTransaction &db_tx_; + std::unique_ptr wsv_command_; + std::unique_ptr peer_query_; + std::unique_ptr block_index_; + std::shared_ptr transaction_executor_; + std::unique_ptr block_storage_; + + bool committed; + + logger::LoggerPtr log_; + }; + +} // namespace iroha::ametsuchi #endif // IROHA_MUTABLE_STORAGE_IMPL_HPP diff --git a/irohad/ametsuchi/impl/postgres_block_query.cpp b/irohad/ametsuchi/impl/postgres_block_query.cpp index cc27326dc3c..bfed0076771 100644 --- a/irohad/ametsuchi/impl/postgres_block_query.cpp +++ b/irohad/ametsuchi/impl/postgres_block_query.cpp @@ -12,64 +12,34 @@ #include "common/cloneable.hpp" #include "logger/logger.hpp" -namespace iroha { - namespace ametsuchi { - PostgresBlockQuery::PostgresBlockQuery(soci::session &sql, - BlockStorage &block_storage, - logger::LoggerPtr log) - : sql_(sql), block_storage_(block_storage), log_(std::move(log)) {} - - PostgresBlockQuery::PostgresBlockQuery(std::unique_ptr sql, - BlockStorage &block_storage, - logger::LoggerPtr log) - : psql_(std::move(sql)), - sql_(*psql_), - block_storage_(block_storage), - log_(std::move(log)) {} - - BlockQuery::BlockResult PostgresBlockQuery::getBlock( - shared_model::interface::types::HeightType height) { - auto block = block_storage_.fetch(height); - if (not block) { - auto error = - boost::format("Failed to retrieve block with height %d") % height; - return expected::makeError( - GetBlockError{GetBlockError::Code::kNoBlock, error.str()}); - } - return std::move(*block); - } - - shared_model::interface::types::HeightType - PostgresBlockQuery::getTopBlockHeight() { - return block_storage_.size(); +namespace iroha::ametsuchi { + + PostgresBlockQuery::PostgresBlockQuery(soci::session &sql, + BlockStorage &block_storage, + logger::LoggerPtr log) + : BlockQueryBase(block_storage, std::move(log)), sql_(sql) {} + + PostgresBlockQuery::PostgresBlockQuery(std::unique_ptr sql, + BlockStorage &block_storage, + logger::LoggerPtr log) + : BlockQueryBase(block_storage, std::move(log)), + psql_(std::move(sql)), + sql_(*psql_) {} + + std::optional PostgresBlockQuery::getTxStatus( + const shared_model::crypto::Hash &hash) { + int res = -1; + const auto &hash_str = hash.hex(); + + try { + sql_ << "SELECT status FROM tx_status_by_hash WHERE hash = :hash", + soci::into(res), soci::use(hash_str); + } catch (const std::exception &e) { + log_->error("Failed to execute query: {}", e.what()); + return std::nullopt; } - std::optional PostgresBlockQuery::checkTxPresence( - const shared_model::crypto::Hash &hash) { - int res = -1; - const auto &hash_str = hash.hex(); - - try { - sql_ << "SELECT status FROM tx_status_by_hash WHERE hash = :hash", - soci::into(res), soci::use(hash_str); - } catch (const std::exception &e) { - log_->error("Failed to execute query: {}", e.what()); - return std::nullopt; - } - - // res > 0 => Committed - // res == 0 => Rejected - // res < 0 => Missing - if (res > 0) { - return std::make_optional( - tx_cache_status_responses::Committed{hash}); - } else if (res == 0) { - return std::make_optional( - tx_cache_status_responses::Rejected{hash}); - } - return std::make_optional( - tx_cache_status_responses::Missing{hash}); - } + return res; + } - } // namespace ametsuchi -} // namespace iroha +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/postgres_block_query.hpp b/irohad/ametsuchi/impl/postgres_block_query.hpp index a2e0c31359a..d98b6008c5f 100644 --- a/irohad/ametsuchi/impl/postgres_block_query.hpp +++ b/irohad/ametsuchi/impl/postgres_block_query.hpp @@ -6,43 +6,33 @@ #ifndef IROHA_POSTGRES_BLOCK_QUERY_HPP #define IROHA_POSTGRES_BLOCK_QUERY_HPP -#include "ametsuchi/block_query.hpp" - #include -#include "ametsuchi/block_storage.hpp" -#include "logger/logger_fwd.hpp" - -namespace iroha { - namespace ametsuchi { - - /** - * Class which implements BlockQuery with a Postgres backend. - */ - class PostgresBlockQuery : public BlockQuery { - public: - PostgresBlockQuery(soci::session &sql, - BlockStorage &block_storage, - logger::LoggerPtr log); - - PostgresBlockQuery(std::unique_ptr sql, - BlockStorage &block_storage, - logger::LoggerPtr log); - - BlockResult getBlock( - shared_model::interface::types::HeightType height) override; - - shared_model::interface::types::HeightType getTopBlockHeight() override; - - std::optional checkTxPresence( - const shared_model::crypto::Hash &hash) override; - - private: - std::unique_ptr psql_; - soci::session &sql_; - BlockStorage &block_storage_; - logger::LoggerPtr log_; - }; - } // namespace ametsuchi -} // namespace iroha + +#include "ametsuchi/impl/block_query_base.hpp" + +namespace iroha::ametsuchi { + + /** + * Class which implements BlockQuery with a Postgres backend. + */ + class PostgresBlockQuery : public BlockQueryBase { + public: + PostgresBlockQuery(soci::session &sql, + BlockStorage &block_storage, + logger::LoggerPtr log); + + PostgresBlockQuery(std::unique_ptr sql, + BlockStorage &block_storage, + logger::LoggerPtr log); + + std::optional getTxStatus( + const shared_model::crypto::Hash &hash) override; + + private: + std::unique_ptr psql_; + soci::session &sql_; + }; + +} // namespace iroha::ametsuchi #endif // IROHA_POSTGRES_BLOCK_QUERY_HPP diff --git a/irohad/ametsuchi/impl/postgres_block_storage.cpp b/irohad/ametsuchi/impl/postgres_block_storage.cpp index 4c1f89cbc76..ca03f12eb87 100644 --- a/irohad/ametsuchi/impl/postgres_block_storage.cpp +++ b/irohad/ametsuchi/impl/postgres_block_storage.cpp @@ -146,6 +146,10 @@ size_t PostgresBlockStorage::size() const { .value_or(0); } +void PostgresBlockStorage::reload() { + // no need to reload +} + void PostgresBlockStorage::clear() { soci::session sql(*pool_wrapper_->connection_pool_); soci::statement st = (sql.prepare << "TRUNCATE " << table_name_); diff --git a/irohad/ametsuchi/impl/postgres_block_storage.hpp b/irohad/ametsuchi/impl/postgres_block_storage.hpp index 77005cfe979..2bef03d382c 100644 --- a/irohad/ametsuchi/impl/postgres_block_storage.hpp +++ b/irohad/ametsuchi/impl/postgres_block_storage.hpp @@ -41,6 +41,8 @@ namespace iroha { size_t size() const override; + void reload() override; + void clear() override; expected::Result forEach( diff --git a/irohad/ametsuchi/impl/postgres_command_executor.cpp b/irohad/ametsuchi/impl/postgres_command_executor.cpp index 2d8f5bc57c5..e9f586fe0e3 100644 --- a/irohad/ametsuchi/impl/postgres_command_executor.cpp +++ b/irohad/ametsuchi/impl/postgres_command_executor.cpp @@ -19,6 +19,7 @@ #include "ametsuchi/impl/postgres_burrow_storage.hpp" #include "ametsuchi/impl/postgres_specific_query_executor.hpp" #include "ametsuchi/impl/soci_std_optional.hpp" +#include "ametsuchi/impl/soci_string_view.hpp" #include "ametsuchi/impl/soci_utils.hpp" #include "ametsuchi/setting_query.hpp" #include "ametsuchi/vm_caller.hpp" @@ -444,28 +445,16 @@ namespace iroha { } // TODO IR-597 mboldyrev 2019.08.10: build args string on demand - void addArgumentToString(const std::string &argument_name, - const std::string &value) { - arguments_string_builder_.appendNamed(argument_name, value); - } - - void addArgumentToString(const std::string &argument_name, - const boost::optional &value) { - if (value) { - addArgumentToString(argument_name, *value); - } - } - - void addArgumentToString(const std::string &argument_name, - const std::optional &value) { + void addArgumentToString(std::string_view argument_name, + const std::optional &value) { if (value) { - addArgumentToString(argument_name, *value); + arguments_string_builder_.appendNamed(argument_name, *value); } } template std::enable_if_t::value> addArgumentToString( - const std::string &argument_name, const T &value) { + std::string_view argument_name, const T &value) { addArgumentToString(argument_name, std::to_string(value)); } @@ -1356,6 +1345,7 @@ namespace iroha { ) SELECT CASE WHEN EXISTS (SELECT * FROM insert_dest LIMIT 1) THEN 0 + WHEN EXISTS (SELECT * FROM checks WHERE not result and code = 4) THEN 4 %s ELSE (SELECT code FROM checks WHERE not result ORDER BY code ASC LIMIT 1) END AS result)", @@ -1404,12 +1394,6 @@ namespace iroha { {}); } - std::string CommandError::toString() const { - return (boost::format("%s: %d with extra info '%s'") % command_name - % error_code % error_extra) - .str(); - } - PostgresCommandExecutor::PostgresCommandExecutor( std::unique_ptr sql, std::shared_ptr @@ -1417,6 +1401,7 @@ namespace iroha { std::shared_ptr specific_query_executor, std::optional> vm_caller) : sql_(std::move(sql)), + db_transaction_(*sql_), perm_converter_{std::move(perm_converter)}, specific_query_executor_{std::move(specific_query_executor)}, vm_caller_{std::move(vm_caller)} { @@ -1425,6 +1410,8 @@ namespace iroha { PostgresCommandExecutor::~PostgresCommandExecutor() = default; + void PostgresCommandExecutor::skipChanges() {} + CommandResult PostgresCommandExecutor::execute( const shared_model::interface::Command &cmd, const shared_model::interface::types::AccountIdType &creator_account_id, @@ -1444,6 +1431,10 @@ namespace iroha { return *sql_; } + DatabaseTransaction &PostgresCommandExecutor::dbSession() { + return db_transaction_; + } + CommandResult PostgresCommandExecutor::operator()( const shared_model::interface::AddAssetQuantity &command, const shared_model::interface::types::AccountIdType &creator_account_id, diff --git a/irohad/ametsuchi/impl/postgres_command_executor.hpp b/irohad/ametsuchi/impl/postgres_command_executor.hpp index 5a666960843..fbec9a3c8f7 100644 --- a/irohad/ametsuchi/impl/postgres_command_executor.hpp +++ b/irohad/ametsuchi/impl/postgres_command_executor.hpp @@ -9,6 +9,7 @@ #include #include "ametsuchi/command_executor.hpp" +#include "ametsuchi/impl/postgres_db_transaction.hpp" #include "ametsuchi/impl/soci_utils.hpp" namespace soci { @@ -67,6 +68,10 @@ namespace iroha { shared_model::interface::types::CommandIndexType cmd_index, bool do_validation) override; + void skipChanges() override; + + DatabaseTransaction &dbSession() override; + soci::session &getSession(); CommandResult operator()( @@ -241,6 +246,7 @@ namespace iroha { const std::vector &permission_checks); std::unique_ptr sql_; + PostgresDbTransaction db_transaction_; std::shared_ptr perm_converter_; diff --git a/irohad/ametsuchi/impl/postgres_db_transaction.hpp b/irohad/ametsuchi/impl/postgres_db_transaction.hpp new file mode 100644 index 00000000000..5873b9f79d6 --- /dev/null +++ b/irohad/ametsuchi/impl/postgres_db_transaction.hpp @@ -0,0 +1,63 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_POSTGRES_DB_TRANSACTION_HPP +#define IROHA_POSTGRES_DB_TRANSACTION_HPP + +#include "ametsuchi/impl/db_transaction.hpp" + +#include + +namespace iroha::ametsuchi { + + class PostgresDbTransaction final : public DatabaseTransaction { + public: + PostgresDbTransaction(PostgresDbTransaction const &) = delete; + PostgresDbTransaction(PostgresDbTransaction &&) = delete; + + PostgresDbTransaction &operator=(PostgresDbTransaction const &) = delete; + PostgresDbTransaction &operator=(PostgresDbTransaction &&) = delete; + + PostgresDbTransaction(soci::session &sql) : sql_(sql) {} + + void begin() override { + sql_ << "BEGIN"; + } + + void prepare(std::string const &name) override { + sql_ << "PREPARE TRANSACTION '" + name + "';"; + } + + void commitPrepared(std::string const &name) override { + sql_ << "COMMIT PREPARED '" + name + "';"; + } + + void savepoint(std::string const &name) override { + sql_ << "SAVEPOINT " + name + ";"; + } + + void releaseSavepoint(std::string const &name) override { + sql_ << "RELEASE SAVEPOINT " + name + ";"; + } + + void commit() override { + sql_ << "COMMIT"; + } + + void rollback() override { + sql_ << "ROLLBACK"; + } + + void rollbackToSavepoint(std::string const &name) override { + sql_ << "ROLLBACK TO SAVEPOINT " + name + ";"; + } + + private: + soci::session &sql_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_DB_TRANSACTION_HPP diff --git a/irohad/ametsuchi/impl/postgres_indexer.cpp b/irohad/ametsuchi/impl/postgres_indexer.cpp index 997065a823d..e5beb60c4a3 100644 --- a/irohad/ametsuchi/impl/postgres_indexer.cpp +++ b/irohad/ametsuchi/impl/postgres_indexer.cpp @@ -5,6 +5,7 @@ #include "ametsuchi/impl/postgres_indexer.hpp" +#include #include #include #include "cryptography/hash.hpp" @@ -19,11 +20,17 @@ void PostgresIndexer::txHashStatus(const HashType &tx_hash, bool is_committed) { tx_hash_status_.status.emplace_back(is_committed ? "TRUE" : "FALSE"); } -void PostgresIndexer::committedTxHash(const HashType &committed_tx_hash) { +void PostgresIndexer::committedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const HashType &committed_tx_hash) { txHashStatus(committed_tx_hash, true); } -void PostgresIndexer::rejectedTxHash(const HashType &rejected_tx_hash) { +void PostgresIndexer::rejectedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const HashType &rejected_tx_hash) { txHashStatus(rejected_tx_hash, false); } @@ -43,12 +50,20 @@ void PostgresIndexer::txPositions( iroha::expected::Result PostgresIndexer::flush() { try { + cache_.clear(); assert(tx_hash_status_.hash.size() == tx_hash_status_.status.size()); if (not tx_hash_status_.hash.empty()) { - sql_ << "INSERT INTO tx_status_by_hash" - "(hash, status) VALUES " - "(:hash, :status);", - soci::use(tx_hash_status_.hash), soci::use(tx_hash_status_.status); + cache_ += + "INSERT INTO tx_status_by_hash" + "(hash, status) VALUES "; + for (size_t ix = 0; ix < tx_hash_status_.hash.size(); ++ix) { + cache_ += fmt::format("('{}','{}')", + tx_hash_status_.hash[ix], + tx_hash_status_.status[ix]); + if (ix != tx_hash_status_.hash.size() - 1) + cache_ += ','; + } + cache_ += ";\n"; tx_hash_status_.hash.clear(); tx_hash_status_.status.clear(); @@ -60,13 +75,30 @@ iroha::expected::Result PostgresIndexer::flush() { assert(tx_positions_.account.size() == tx_positions_.height.size()); assert(tx_positions_.account.size() == tx_positions_.index.size()); if (!tx_positions_.account.empty()) { - sql_ << "INSERT INTO tx_positions" - "(creator_id, hash, asset_id, ts, height, index) VALUES " - "(:creator_id, :hash, :asset_id, :ts, :height, :index) ON " - "CONFLICT DO NOTHING;", - soci::use(tx_positions_.account), soci::use(tx_positions_.hash), - soci::use(tx_positions_.asset_id), soci::use(tx_positions_.ts), - soci::use(tx_positions_.height), soci::use(tx_positions_.index); + cache_ += + "INSERT INTO tx_positions" + "(creator_id, hash, asset_id, ts, height, index) VALUES "; + for (size_t ix = 0; ix < tx_positions_.account.size(); ++ix) { + if (tx_positions_.asset_id[ix]) { + cache_ += fmt::format("('{}','{}','{}',{},{},{})", + tx_positions_.account[ix], + tx_positions_.hash[ix], + *tx_positions_.asset_id[ix], + tx_positions_.ts[ix], + tx_positions_.height[ix], + tx_positions_.index[ix]); + } else { + cache_ += fmt::format("('{}','{}',NULL,{},{},{})", + tx_positions_.account[ix], + tx_positions_.hash[ix], + tx_positions_.ts[ix], + tx_positions_.height[ix], + tx_positions_.index[ix]); + } + if (ix != tx_positions_.account.size() - 1) + cache_ += ','; + } + cache_ += " ON CONFLICT DO NOTHING;\n"; tx_positions_.account.clear(); tx_positions_.hash.clear(); @@ -76,6 +108,8 @@ iroha::expected::Result PostgresIndexer::flush() { tx_positions_.index.clear(); } + if (!cache_.empty()) + sql_ << cache_; } catch (const std::exception &e) { return e.what(); } diff --git a/irohad/ametsuchi/impl/postgres_indexer.hpp b/irohad/ametsuchi/impl/postgres_indexer.hpp index c38ba01a473..f086206591b 100644 --- a/irohad/ametsuchi/impl/postgres_indexer.hpp +++ b/irohad/ametsuchi/impl/postgres_indexer.hpp @@ -22,11 +22,17 @@ namespace iroha { public: PostgresIndexer(soci::session &sql); - void committedTxHash(const shared_model::interface::types::HashType - &committed_tx_hash) override; + void committedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const shared_model::interface::types::HashType &committed_tx_hash) + override; - void rejectedTxHash(const shared_model::interface::types::HashType - &rejected_tx_hash) override; + void rejectedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const shared_model::interface::types::HashType &rejected_tx_hash) + override; void txPositions( shared_model::interface::types::AccountIdType const &account, @@ -60,6 +66,7 @@ namespace iroha { bool is_committed); soci::session &sql_; + std::string cache_; }; } // namespace ametsuchi diff --git a/irohad/ametsuchi/impl/postgres_options.cpp b/irohad/ametsuchi/impl/postgres_options.cpp index 8b6ec8f8305..a50d249f911 100644 --- a/irohad/ametsuchi/impl/postgres_options.cpp +++ b/irohad/ametsuchi/impl/postgres_options.cpp @@ -5,12 +5,12 @@ #include "ametsuchi/impl/postgres_options.hpp" +#include +#include #include #include #include -#include -#include #include "logger/logger.hpp" using namespace iroha::ametsuchi; @@ -55,13 +55,13 @@ PostgresOptions::PostgresOptions(const std::string &pg_opt, std::string default_dbname, logger::LoggerPtr log) : PostgresOptions( - extractField(pg_opt, "host"), - getPort(extractField(pg_opt, "port")), - extractField(pg_opt, "user"), - extractField(pg_opt, "password"), - extractOptionalField(pg_opt, "dbname").value_or(default_dbname), - extractField(pg_opt, "user"), - std::move(log)) {} + extractField(pg_opt, "host"), + getPort(extractField(pg_opt, "port")), + extractField(pg_opt, "user"), + extractField(pg_opt, "password"), + extractOptionalField(pg_opt, "dbname").value_or(default_dbname), + extractOptionalField(pg_opt, "maintenance_dbname").value_or("postgres"), + std::move(log)) {} PostgresOptions::PostgresOptions(const std::string &host, uint16_t port, @@ -80,7 +80,7 @@ PostgresOptions::PostgresOptions(const std::string &host, if (working_dbname_ == maintenance_dbname_) { log->warn( "Working database has the same name with maintenance database: '{}'. " - "This may cause failures.", + "This will cause failures.", working_dbname_); } } diff --git a/irohad/ametsuchi/impl/postgres_query_executor.cpp b/irohad/ametsuchi/impl/postgres_query_executor.cpp index 5cc25355b12..2c99f47d5f9 100644 --- a/irohad/ametsuchi/impl/postgres_query_executor.cpp +++ b/irohad/ametsuchi/impl/postgres_query_executor.cpp @@ -15,83 +15,59 @@ using namespace shared_model::interface::permissions; -namespace iroha { - namespace ametsuchi { +namespace iroha::ametsuchi { - PostgresQueryExecutor::PostgresQueryExecutor( - std::unique_ptr sql, - std::shared_ptr - response_factory, - std::shared_ptr specific_query_executor, - logger::LoggerPtr log) - : sql_(std::move(sql)), - specific_query_executor_(std::move(specific_query_executor)), - query_response_factory_{std::move(response_factory)}, - log_(std::move(log)) {} + PostgresQueryExecutor::PostgresQueryExecutor( + std::unique_ptr sql, + std::shared_ptr + response_factory, + std::shared_ptr specific_query_executor, + logger::LoggerPtr log) + : QueryExecutorBase(std::move(response_factory), + std::move(specific_query_executor), + std::move(log)), + sql_(std::move(sql)) {} - template - bool PostgresQueryExecutor::validateSignatures(const Q &query) { - auto keys_range = - query.signatures() | boost::adaptors::transformed([](const auto &s) { - return s.publicKey(); - }); + bool PostgresQueryExecutor::validateSignatures( + const shared_model::interface::Query &query) { + return validateSignaturesImpl(query); + } - if (boost::size(keys_range) != 1) { - return false; - } - std::string keys = *std::begin(keys_range); - // not using bool since it is not supported by SOCI - boost::optional signatories_valid; + bool PostgresQueryExecutor::validateSignatures( + const shared_model::interface::BlocksQuery &query) { + return validateSignaturesImpl(query); + } - auto qry = R"( + template + bool PostgresQueryExecutor::validateSignaturesImpl(const Q &query) { + auto keys_range = + query.signatures() | boost::adaptors::transformed([](const auto &s) { + return s.publicKey(); + }); + + if (boost::size(keys_range) != 1) { + return false; + } + std::string keys = *std::begin(keys_range); + // not using bool since it is not supported by SOCI + boost::optional signatories_valid; + + auto qry = R"( SELECT count(public_key) = 1 FROM account_has_signatory WHERE account_id = :account_id AND public_key = lower(:pk) )"; - try { - *sql_ << qry, soci::into(signatories_valid), - soci::use(query.creatorAccountId(), "account_id"), - soci::use(keys, "pk"); - } catch (const std::exception &e) { - log_->error("{}", e.what()); - return false; - } - - return signatories_valid and *signatories_valid; + try { + *sql_ << qry, soci::into(signatories_valid), + soci::use(query.creatorAccountId(), "account_id"), + soci::use(keys, "pk"); + } catch (const std::exception &e) { + log_->error("{}", e.what()); + return false; } - QueryExecutorResult PostgresQueryExecutor::validateAndExecute( - const shared_model::interface::Query &query, - const bool validate_signatories = true) { - if (validate_signatories and not validateSignatures(query)) { - // TODO [IR-1816] Akvinikym 03.12.18: replace magic number 3 - // with a named constant - return query_response_factory_->createErrorQueryResponse( - shared_model::interface::QueryResponseFactory::ErrorQueryType:: - kStatefulFailed, - "query signatories did not pass validation", - 3, - query.hash()); - } - return specific_query_executor_->execute(query); - } - - bool PostgresQueryExecutor::validate( - const shared_model::interface::BlocksQuery &query, - const bool validate_signatories = true) { - if (validate_signatories and not validateSignatures(query)) { - log_->error("query signatories did not pass validation"); - return false; - } - if (not specific_query_executor_->hasAccountRolePermission( - Role::kGetBlocks, query.creatorAccountId())) { - log_->error("query creator does not have enough permissions"); - return false; - } - - return true; - } + return signatories_valid and *signatories_valid; + } - } // namespace ametsuchi -} // namespace iroha +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/postgres_query_executor.hpp b/irohad/ametsuchi/impl/postgres_query_executor.hpp index fcd52f8a74b..6e72ff44acb 100644 --- a/irohad/ametsuchi/impl/postgres_query_executor.hpp +++ b/irohad/ametsuchi/impl/postgres_query_executor.hpp @@ -6,7 +6,7 @@ #ifndef IROHA_POSTGRES_QUERY_EXECUTOR_HPP #define IROHA_POSTGRES_QUERY_EXECUTOR_HPP -#include "ametsuchi/query_executor.hpp" +#include "ametsuchi/impl/query_executor_base.hpp" #include #include "logger/logger_fwd.hpp" @@ -17,39 +17,30 @@ namespace shared_model { } // namespace interface } // namespace shared_model -namespace iroha { - namespace ametsuchi { +namespace iroha::ametsuchi { - class SpecificQueryExecutor; + class SpecificQueryExecutor; - class PostgresQueryExecutor : public QueryExecutor { - public: - PostgresQueryExecutor( - std::unique_ptr sql, - std::shared_ptr - response_factory, - std::shared_ptr specific_query_executor, - logger::LoggerPtr log); + class PostgresQueryExecutor : public QueryExecutorBase { + public: + PostgresQueryExecutor( + std::unique_ptr sql, + std::shared_ptr + response_factory, + std::shared_ptr specific_query_executor, + logger::LoggerPtr log); - QueryExecutorResult validateAndExecute( - const shared_model::interface::Query &query, - const bool validate_signatories) override; + bool validateSignatures( + const shared_model::interface::Query &query) override; + bool validateSignatures( + const shared_model::interface::BlocksQuery &query) override; - bool validate(const shared_model::interface::BlocksQuery &query, - const bool validate_signatories) override; + private: + template + bool validateSignaturesImpl(const Q &query); + std::unique_ptr sql_; + }; - private: - template - bool validateSignatures(const Q &query); - - std::unique_ptr sql_; - std::shared_ptr specific_query_executor_; - std::shared_ptr - query_response_factory_; - logger::LoggerPtr log_; - }; - - } // namespace ametsuchi -} // namespace iroha +} // namespace iroha::ametsuchi #endif // IROHA_POSTGRES_QUERY_EXECUTOR_HPP diff --git a/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp b/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp index 25b13fc198f..4b74f4b0579 100644 --- a/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp +++ b/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp @@ -5,15 +5,15 @@ #include "ametsuchi/impl/postgres_specific_query_executor.hpp" -#include -#include - #include #include #include #include #include #include +#include +#include + #include "ametsuchi/block_storage.hpp" #include "ametsuchi/impl/executor_common.hpp" #include "ametsuchi/impl/soci_std_optional.hpp" @@ -453,6 +453,10 @@ namespace iroha { FROM tx_positions WHERE {2} -- related_txs + {5} -- time interval begin + {6} -- time interval end + {7} -- height begin + {8} -- height end {1} -- ordering ), total_size AS (SELECT COUNT(*) FROM my_txs) {3} @@ -471,7 +475,6 @@ namespace iroha { 1, query_hash); } - auto query = fmt::format( base, hasQueryPermissionTarget(creator_id, q.accountId(), perms...), @@ -480,7 +483,11 @@ namespace iroha { (first_hash ? R"(, base_row AS(SELECT row FROM my_txs WHERE hash = lower(:hash) LIMIT 1))" : ""), - (first_hash ? R"(JOIN base_row ON my_txs.row >= base_row.row)" : "")); + (first_hash ? R"(JOIN base_row ON my_txs.row >= base_row.row)" : ""), + "AND (:first_tx_time::text IS NULL OR :first_tx_time<=ts)", + "AND (:last_tx_time::text IS NULL OR :last_tx_time>=ts )", + "AND (:first_tx_height::text IS NULL OR :first_tx_height<=height)", + "AND (:last_tx_height::text IS NULL OR :last_tx_height>=height )"); return executeQuery( applier(query), @@ -704,12 +711,12 @@ namespace iroha { return query_response_factory_->createSignatoriesResponse( pubkeys, query_hash); }, + notEnoughPermissionsResponse(perm_converter_, Role::kGetMySignatories, Role::kGetAllSignatories, Role::kGetDomainSignatories)); } - QueryExecutorResult PostgresSpecificQueryExecutor::operator()( const shared_model::interface::GetAccountTransactions &q, const shared_model::interface::types::AccountIdType &creator_id, @@ -724,17 +731,30 @@ namespace iroha { // retrieve one extra transaction to populate next_hash auto query_size = pagination_info.pageSize() + 1u; + auto first_tx_time = pagination_info.firstTxTime(); + auto last_tx_time = pagination_info.lastTxTime(); + auto first_tx_height = pagination_info.firstTxHeight(); + auto last_tx_height = pagination_info.lastTxHeight(); + soci::indicator ind = soci::i_null; auto apply_query = [&](const auto &query) { return [&] { if (first_hash) { return (sql_.prepare << query, - soci::use(q.accountId()), - soci::use(first_hash->hex()), - soci::use(query_size)); + soci::use(q.accountId(), "account_id"), + soci::use(first_hash->hex(), "hash"), + soci::use(query_size, "page_size"), + soci::use(first_tx_time, ind, "first_tx_time"), + soci::use(last_tx_time, ind, "last_tx_time"), + soci::use(first_tx_height, ind, "first_tx_height"), + soci::use(last_tx_height, ind, "last_tx_height")); } else { return (sql_.prepare << query, - soci::use(q.accountId()), - soci::use(query_size)); + soci::use(q.accountId(), "account_id"), + soci::use(query_size, "page_size"), + soci::use(first_tx_time, ind, "first_tx_time"), + soci::use(last_tx_time, ind, "last_tx_time"), + soci::use(first_tx_height, ind, "first_tx_height"), + soci::use(last_tx_height, ind, "last_tx_height")); } }; }; @@ -854,20 +874,32 @@ namespace iroha { auto first_hash = pagination_info.firstTxHash(); // retrieve one extra transaction to populate next_hash auto query_size = pagination_info.pageSize() + 1u; - + auto first_tx_time = pagination_info.firstTxTime(); + auto last_tx_time = pagination_info.lastTxTime(); + auto first_tx_height = pagination_info.firstTxHeight(); + auto last_tx_height = pagination_info.lastTxHeight(); + soci::indicator ind = soci::i_null; auto apply_query = [&](const auto &query) { return [&] { if (first_hash) { return (sql_.prepare << query, - soci::use(q.accountId()), - soci::use(q.assetId()), - soci::use(first_hash->hex()), - soci::use(query_size)); + soci::use(q.accountId(), "account_id"), + soci::use(q.assetId(), "asset_id"), + soci::use(first_hash->hex(), "hash"), + soci::use(query_size, "page_size"), + soci::use(first_tx_time, ind, "first_tx_time"), + soci::use(last_tx_time, ind, "last_tx_time"), + soci::use(first_tx_height, ind, "first_tx_height"), + soci::use(last_tx_height, ind, "last_tx_height")); } else { return (sql_.prepare << query, - soci::use(q.accountId()), - soci::use(q.assetId()), - soci::use(query_size)); + soci::use(q.accountId(), "account_id"), + soci::use(q.assetId(), "asset_id"), + soci::use(query_size, "page_size"), + soci::use(first_tx_time, ind, "first_tx_time"), + soci::use(last_tx_time, ind, "last_tx_time"), + soci::use(first_tx_height, ind, "first_tx_height"), + soci::use(last_tx_height, ind, "last_tx_height")); } }; }; diff --git a/irohad/ametsuchi/impl/postgres_temporary_wsv_impl.cpp b/irohad/ametsuchi/impl/postgres_temporary_wsv_impl.cpp new file mode 100644 index 00000000000..b59ce7971c7 --- /dev/null +++ b/irohad/ametsuchi/impl/postgres_temporary_wsv_impl.cpp @@ -0,0 +1,77 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/postgres_temporary_wsv_impl.hpp" + +#include +#include +#include +#include "ametsuchi/impl/postgres_command_executor.hpp" +#include "ametsuchi/impl/postgres_db_transaction.hpp" +#include "ametsuchi/tx_executor.hpp" +#include "interfaces/commands/command.hpp" +#include "interfaces/permission_to_string.hpp" +#include "interfaces/transaction.hpp" +#include "logger/logger.hpp" +#include "logger/logger_manager.hpp" + +namespace iroha::ametsuchi { + + PostgresTemporaryWsvImpl::PostgresTemporaryWsvImpl( + std::shared_ptr command_executor, + logger::LoggerManagerTreePtr log_manager) + : TemporaryWsvImpl(command_executor, log_manager), + sql_(command_executor->getSession()) {} + + expected::Result + PostgresTemporaryWsvImpl::validateSignatures( + const shared_model::interface::Transaction &transaction) { + auto keys_range = transaction.signatures() + | boost::adaptors::transformed( + [](const auto &s) { return s.publicKey(); }); + auto keys = boost::algorithm::join(keys_range, "'), ('"); + // not using bool since it is not supported by SOCI + boost::optional signatories_valid; + + boost::format query(R"(SELECT sum(count) = :signatures_count + AND sum(quorum) <= :signatures_count + FROM + (SELECT count(public_key) + FROM ( VALUES ('%s') ) AS CTE1(public_key) + WHERE lower(public_key) IN + (SELECT public_key + FROM account_has_signatory + WHERE account_id = :account_id ) ) AS CTE2(count), + (SELECT quorum + FROM account + WHERE account_id = :account_id) AS CTE3(quorum))"); + + try { + auto keys_range_size = boost::size(keys_range); + sql_ << (query % keys).str(), soci::into(signatories_valid), + soci::use(keys_range_size, "signatures_count"), + soci::use(transaction.creatorAccountId(), "account_id"); + } catch (const std::exception &e) { + auto error_str = "Transaction " + transaction.toString() + + " failed signatures validation with db error: " + e.what(); + // TODO [IR-1816] Akvinikym 29.10.18: substitute error code magic number + // with named constant + return expected::makeError(validation::CommandError{ + "signatures validation", 1, error_str, false}); + } + + if (signatories_valid and *signatories_valid) { + return {}; + } else { + auto error_str = "Transaction " + transaction.toString() + + " failed signatures validation"; + // TODO [IR-1816] Akvinikym 29.10.18: substitute error code magic number + // with named constant + return expected::makeError(validation::CommandError{ + "signatures validation", 2, error_str, false}); + } + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/postgres_temporary_wsv_impl.hpp b/irohad/ametsuchi/impl/postgres_temporary_wsv_impl.hpp new file mode 100644 index 00000000000..be381607ad0 --- /dev/null +++ b/irohad/ametsuchi/impl/postgres_temporary_wsv_impl.hpp @@ -0,0 +1,45 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_POSTGRES_TEMPORARY_WSV_IMPL_HPP +#define IROHA_POSTGRES_TEMPORARY_WSV_IMPL_HPP + +#include "ametsuchi/impl/temporary_wsv_impl.hpp" + +#include + +namespace shared_model { + namespace interface { + class PermissionToString; + } +} // namespace shared_model + +namespace iroha::ametsuchi { + + class PostgresCommandExecutor; + class TransactionExecutor; + + class PostgresTemporaryWsvImpl final : public TemporaryWsvImpl { + public: + PostgresTemporaryWsvImpl( + std::shared_ptr command_executor, + logger::LoggerManagerTreePtr log_manager); + + ~PostgresTemporaryWsvImpl() = default; + + soci::session &getSession() { + return sql_; + } + + protected: + expected::Result validateSignatures( + const shared_model::interface::Transaction &transaction); + + soci::session &sql_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_TEMPORARY_WSV_IMPL_HPP diff --git a/irohad/ametsuchi/impl/postgres_wsv_command.cpp b/irohad/ametsuchi/impl/postgres_wsv_command.cpp index 44f4f45ac9d..7d02a0c6041 100644 --- a/irohad/ametsuchi/impl/postgres_wsv_command.cpp +++ b/irohad/ametsuchi/impl/postgres_wsv_command.cpp @@ -423,5 +423,6 @@ namespace iroha { return fmt::format("Failed to set top_block_info: {}.", e.what()); } } + } // namespace ametsuchi } // namespace iroha diff --git a/irohad/ametsuchi/impl/postgres_wsv_query.cpp b/irohad/ametsuchi/impl/postgres_wsv_query.cpp index fcaf31821ea..b25df7b32fc 100644 --- a/irohad/ametsuchi/impl/postgres_wsv_query.cpp +++ b/irohad/ametsuchi/impl/postgres_wsv_query.cpp @@ -6,6 +6,7 @@ #include "ametsuchi/impl/postgres_wsv_query.hpp" #include + #include "ametsuchi/impl/soci_std_optional.hpp" #include "ametsuchi/impl/soci_utils.hpp" #include "ametsuchi/ledger_state.hpp" @@ -79,6 +80,35 @@ namespace iroha { return getPeersFromSociRowSet(result); } + iroha::expected::Result PostgresWsvQuery::count( + std::string_view table, std::string_view column /* ="*" */) try { + int count; + sql_ << "SELECT count(" << column << ") FROM " << table, + soci::into(count); + return count; + } catch (const std::exception &e) { + auto msg = fmt::format("Failed to count {}, query: {}", table, e.what()); + log_->error(msg); + return iroha::expected::makeError(msg); + } + + iroha::expected::Result + PostgresWsvQuery::countPeers() { + return count("peer"); + } + + iroha::expected::Result + PostgresWsvQuery::countDomains() { + return count("domain"); + } + + iroha::expected::Result + PostgresWsvQuery::countTransactions() { + return count("tx_positions", "DISTINCT hash"); + // OR return count("tx_status_from_hash", "*", "WHERE status=true"); + // //select count(*) from tx_status_by_hash where status=true + } + boost::optional> PostgresWsvQuery::getPeerByPublicKey( shared_model::interface::types::PublicKeyHexStringView public_key) { diff --git a/irohad/ametsuchi/impl/postgres_wsv_query.hpp b/irohad/ametsuchi/impl/postgres_wsv_query.hpp index 746541b716e..25ad27bf612 100644 --- a/irohad/ametsuchi/impl/postgres_wsv_query.hpp +++ b/irohad/ametsuchi/impl/postgres_wsv_query.hpp @@ -6,9 +6,9 @@ #ifndef IROHA_POSTGRES_WSV_QUERY_HPP #define IROHA_POSTGRES_WSV_QUERY_HPP -#include "ametsuchi/wsv_query.hpp" - #include + +#include "ametsuchi/wsv_query.hpp" #include "logger/logger_fwd.hpp" namespace iroha { @@ -28,6 +28,10 @@ namespace iroha { std::vector>> getPeers() override; + iroha::expected::Result countPeers() override; + iroha::expected::Result countDomains() override; + iroha::expected::Result countTransactions() override; + boost::optional> getPeerByPublicKey(shared_model::interface::types::PublicKeyHexStringView public_key) override; @@ -43,6 +47,9 @@ namespace iroha { template auto execute(F &&f) -> boost::optional>; + iroha::expected::Result count( + std::string_view, std::string_view column = "*"); + // TODO andrei 24.09.2018: IR-1718 Consistent soci::session fields in // storage classes std::unique_ptr psql_; diff --git a/irohad/ametsuchi/impl/query_executor_base.cpp b/irohad/ametsuchi/impl/query_executor_base.cpp new file mode 100644 index 00000000000..c12b0bec30c --- /dev/null +++ b/irohad/ametsuchi/impl/query_executor_base.cpp @@ -0,0 +1,61 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/query_executor_base.hpp" + +#include +#include +#include "ametsuchi/specific_query_executor.hpp" +#include "interfaces/iroha_internal/query_response_factory.hpp" +#include "interfaces/queries/blocks_query.hpp" +#include "interfaces/queries/query.hpp" +#include "logger/logger.hpp" + +using namespace shared_model::interface::permissions; + +namespace iroha::ametsuchi { + + QueryExecutorBase::QueryExecutorBase( + std::shared_ptr + response_factory, + std::shared_ptr specific_query_executor, + logger::LoggerPtr log) + : specific_query_executor_(std::move(specific_query_executor)), + query_response_factory_{std::move(response_factory)}, + log_(std::move(log)) {} + + QueryExecutorResult QueryExecutorBase::validateAndExecute( + const shared_model::interface::Query &query, + const bool validate_signatories = true) { + if (validate_signatories and not validateSignatures(query)) { + // TODO [IR-1816] Akvinikym 03.12.18: replace magic number 3 + // with a named constant + return query_response_factory_->createErrorQueryResponse( + shared_model::interface::QueryResponseFactory::ErrorQueryType:: + kStatefulFailed, + "query signatories did not pass validation", + 3, + query.hash()); + } + return specific_query_executor_->execute(query); + } + + bool QueryExecutorBase::validate( + const shared_model::interface::BlocksQuery &query, + const bool validate_signatories = true) { + if (validate_signatories and not validateSignatures(query)) { + log_->error("query signatories did not pass validation"); + return false; + } + if (not specific_query_executor_->hasAccountRolePermission( + Role::kGetBlocks, query.creatorAccountId())) { + log_->error("query creator does not have enough permissions"); + return false; + } + + return true; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/query_executor_base.hpp b/irohad/ametsuchi/impl/query_executor_base.hpp new file mode 100644 index 00000000000..0f588d8a760 --- /dev/null +++ b/irohad/ametsuchi/impl/query_executor_base.hpp @@ -0,0 +1,52 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_QUERY_EXECUTOR_BASE_HPP +#define IROHA_QUERY_EXECUTOR_BASE_HPP + +#include "ametsuchi/query_executor.hpp" + +#include "logger/logger_fwd.hpp" + +namespace shared_model { + namespace interface { + class QueryResponseFactory; + } // namespace interface +} // namespace shared_model + +namespace iroha::ametsuchi { + + class SpecificQueryExecutor; + + class QueryExecutorBase : public QueryExecutor { + public: + QueryExecutorBase( + std::shared_ptr + response_factory, + std::shared_ptr specific_query_executor, + logger::LoggerPtr log); + + QueryExecutorResult validateAndExecute( + const shared_model::interface::Query &query, + const bool validate_signatories) override; + + bool validate(const shared_model::interface::BlocksQuery &query, + const bool validate_signatories) override; + + virtual bool validateSignatures( + const shared_model::interface::Query &query) = 0; + virtual bool validateSignatures( + const shared_model::interface::BlocksQuery &query) = 0; + + protected: + std::shared_ptr specific_query_executor_; + std::shared_ptr + query_response_factory_; + logger::LoggerPtr log_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_QUERY_EXECUTOR_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_block_query.cpp b/irohad/ametsuchi/impl/rocksdb_block_query.cpp new file mode 100644 index 00000000000..7ffbc04b9fb --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_block_query.cpp @@ -0,0 +1,42 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_block_query.hpp" + +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "common/cloneable.hpp" +#include "logger/logger.hpp" + +namespace iroha::ametsuchi { + + RocksDbBlockQuery::RocksDbBlockQuery( + std::shared_ptr db_context, + BlockStorage &block_storage, + logger::LoggerPtr log) + : BlockQueryBase(block_storage, std::move(log)), + db_context_(std::move(db_context)) {} + + std::optional RocksDbBlockQuery::getTxStatus( + const shared_model::crypto::Hash &hash) { + int res = -1; + RocksDbCommon common(db_context_); + + if (auto status = + forTransactionStatus( + common, hash.hex()); + expected::hasError(status)) { + log_->error("Failed to execute query: {}, code: {}", + status.assumeError().description, + status.assumeError().code); + return std::nullopt; + } else if (status.assumeValue()) { + auto const &[tx_status] = staticSplitId<1ull>(*status.assumeValue(), "#"); + res = tx_status == "TRUE" ? 1 : 0; + } + + return res; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_block_query.hpp b/irohad/ametsuchi/impl/rocksdb_block_query.hpp new file mode 100644 index 00000000000..53e2d3b2d58 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_block_query.hpp @@ -0,0 +1,33 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_BLOCK_QUERY_HPP +#define IROHA_ROCKSDB_BLOCK_QUERY_HPP + +#include "ametsuchi/impl/block_query_base.hpp" + +namespace iroha::ametsuchi { + + struct RocksDBContext; + + /** + * Class which implements BlockQuery with a RocksDB backend. + */ + class RocksDbBlockQuery : public BlockQueryBase { + public: + RocksDbBlockQuery(std::shared_ptr db_context, + BlockStorage &block_storage, + logger::LoggerPtr log); + + std::optional getTxStatus( + const shared_model::crypto::Hash &hash) override; + + private: + std::shared_ptr db_context_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_BLOCK_QUERY_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_block_storage.cpp b/irohad/ametsuchi/impl/rocksdb_block_storage.cpp new file mode 100644 index 00000000000..0e4f6e9ce0d --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_block_storage.cpp @@ -0,0 +1,135 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_block_storage.hpp" + +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "backend/protobuf/block.hpp" +#include "common/byteutils.hpp" +#include "logger/logger.hpp" + +using namespace iroha::ametsuchi; + +#define CHECK_OPERATION(command, ...) \ + if (auto result = (__VA_ARGS__); expected::hasError(result)) { \ + log_->error("Error while block {} " command ". Code: {}. Description: {}", \ + block->height(), \ + result.assumeError().code, \ + result.assumeError().description); \ + return false; \ + } + +namespace { + inline iroha::expected::Result incrementTotalBlocksCount( + iroha::ametsuchi::RocksDbCommon &common) { + RDB_TRY_GET_VALUE( + opt_count, + forBlocksTotalCount(common)); + + common.encode(opt_count ? *opt_count + 1ull : 1ull); + RDB_ERROR_CHECK( + forBlocksTotalCount(common)); + + return {}; + } +} // namespace + +RocksDbBlockStorage::RocksDbBlockStorage( + std::shared_ptr db_context, + std::shared_ptr json_converter, + logger::LoggerPtr log) + : db_context_(std::move(db_context)), + json_converter_(std::move(json_converter)), + log_(std::move(log)) {} + +bool RocksDbBlockStorage::insert( + std::shared_ptr block) { + return json_converter_->serialize(*block).match( + [&](const auto &block_json) { + RocksDbCommon common(db_context_); + CHECK_OPERATION("insertion", + forBlock( + common, block->height())); + + common.valueBuffer() = block_json.value; + CHECK_OPERATION("storing", + forBlock(common, block->height())); + + CHECK_OPERATION("total count storing", + incrementTotalBlocksCount(common)); + return true; + }, + [this](const auto &error) { + log_->warn("Error while block serialization: {}", error.error); + return false; + }); +} + +boost::optional> +RocksDbBlockStorage::fetch( + shared_model::interface::types::HeightType height) const { + RocksDbCommon common(db_context_); + if (auto result = + forBlock(common, height); + expected::hasError(result)) { + log_->error("Error while block {} reading. Code: {}. Description: {}", + height, + result.assumeError().code, + result.assumeError().description); + return boost::none; + } + + return json_converter_->deserialize(common.valueBuffer()) + .match( + [&](auto &&block) { + return boost::make_optional< + std::unique_ptr>( + std::move(block.value)); + }, + [&](const auto &error) + -> boost::optional< + std::unique_ptr> { + log_->warn("Error while block deserialization: {}", error.error); + return boost::none; + }); +} + +size_t RocksDbBlockStorage::size() const { + RocksDbCommon common(db_context_); + if (auto result = + forBlocksTotalCount(common); + expected::hasValue(result)) + return *result.assumeValue(); + return 0ull; +} + +void RocksDbBlockStorage::reload() {} + +void RocksDbBlockStorage::clear() { + RocksDbCommon common(db_context_); + + if (auto status = common.filterDelete(fmtstrings::kPathWsv); !status.ok()) + log_->error("Unable to delete WSV. Description: {}", status.ToString()); + + if (auto status = common.filterDelete(fmtstrings::kPathStore); !status.ok()) + log_->error("Unable to delete STORE. Description: {}", status.ToString()); +} + +iroha::expected::Result RocksDbBlockStorage::forEach( + iroha::ametsuchi::BlockStorage::FunctionType function) const { + uint64_t const blocks_count = size(); + for (uint64_t ix = 1; ix <= blocks_count; ++ix) { + auto maybe_block = fetch(ix); + if (maybe_block) { + auto maybe_error = function(std::move(maybe_block).value()); + if (iroha::expected::hasError(maybe_error)) { + return maybe_error.assumeError(); + } + } else { + return fmt::format("Failed to fetch block {}", ix); + } + } + return {}; +} diff --git a/irohad/ametsuchi/impl/rocksdb_block_storage.hpp b/irohad/ametsuchi/impl/rocksdb_block_storage.hpp new file mode 100644 index 00000000000..d83d0c34dfe --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_block_storage.hpp @@ -0,0 +1,49 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_BLOCK_STORAGE_HPP +#define IROHA_ROCKSDB_BLOCK_STORAGE_HPP + +#include "ametsuchi/block_storage.hpp" + +#include "interfaces/iroha_internal/block_json_converter.hpp" +#include "logger/logger_fwd.hpp" + +namespace iroha::ametsuchi { + struct RocksDBContext; + + class RocksDbBlockStorage : public BlockStorage { + public: + RocksDbBlockStorage( + std::shared_ptr db_context, + std::shared_ptr + json_converter, + logger::LoggerPtr log); + + bool insert( + std::shared_ptr block) override; + + boost::optional> fetch( + shared_model::interface::types::HeightType height) const override; + + size_t size() const override; + + void reload() override; + + void clear() override; + + expected::Result forEach( + FunctionType function) const override; + + private: + std::shared_ptr db_context_; + std::shared_ptr + json_converter_; + logger::LoggerPtr log_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_ROCKSDB_BLOCK_STORAGE_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_block_storage_factory.cpp b/irohad/ametsuchi/impl/rocksdb_block_storage_factory.cpp new file mode 100644 index 00000000000..e68381cf895 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_block_storage_factory.cpp @@ -0,0 +1,28 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_block_storage_factory.hpp" + +#include "ametsuchi/impl/rocksdb_block_storage.hpp" +#include "ametsuchi/impl/rocksdb_common.hpp" + +using namespace iroha::ametsuchi; + +RocksDbBlockStorageFactory::RocksDbBlockStorageFactory( + std::shared_ptr db_context, + std::shared_ptr + json_block_converter, + logger::LoggerManagerTreePtr log_manager) + : db_context_(std::move(db_context)), + json_block_converter_(std::move(json_block_converter)), + log_manager_(std::move(log_manager)) {} + +iroha::expected::Result, std::string> +RocksDbBlockStorageFactory::create() { + return std::make_unique( + db_context_, + json_block_converter_, + log_manager_->getChild("RocksDbBlockFactory")->getLogger()); +} diff --git a/irohad/ametsuchi/impl/rocksdb_block_storage_factory.hpp b/irohad/ametsuchi/impl/rocksdb_block_storage_factory.hpp new file mode 100644 index 00000000000..3a6c6b51c5b --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_block_storage_factory.hpp @@ -0,0 +1,37 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_BLOCK_STORAGE_FACTORY_HPP +#define IROHA_ROCKSDB_BLOCK_STORAGE_FACTORY_HPP + +#include "ametsuchi/block_storage_factory.hpp" + +#include "interfaces/iroha_internal/block_json_converter.hpp" +#include "logger/logger_manager.hpp" + +namespace iroha::ametsuchi { + struct RocksDBContext; + + class RocksDbBlockStorageFactory : public BlockStorageFactory { + public: + RocksDbBlockStorageFactory( + std::shared_ptr db_context, + std::shared_ptr + json_block_converter, + logger::LoggerManagerTreePtr log_manager); + + iroha::expected::Result, std::string> create() + override; + + private: + std::shared_ptr db_context_; + std::shared_ptr + json_block_converter_; + logger::LoggerManagerTreePtr log_manager_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_ROCKSDB_BLOCK_STORAGE_FACTORY_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp new file mode 100644 index 00000000000..4cdf6f2b2fc --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp @@ -0,0 +1,1113 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_command_executor.hpp" + +#include +#include +#include +#include +#include "ametsuchi/impl/executor_common.hpp" +#include "ametsuchi/setting_query.hpp" +#include "ametsuchi/vm_caller.hpp" +#include "common/to_lower.hpp" +#include "interfaces/commands/add_asset_quantity.hpp" +#include "interfaces/commands/add_peer.hpp" +#include "interfaces/commands/add_signatory.hpp" +#include "interfaces/commands/append_role.hpp" +#include "interfaces/commands/call_engine.hpp" +#include "interfaces/commands/command.hpp" +#include "interfaces/commands/compare_and_set_account_detail.hpp" +#include "interfaces/commands/create_account.hpp" +#include "interfaces/commands/create_asset.hpp" +#include "interfaces/commands/create_domain.hpp" +#include "interfaces/commands/create_role.hpp" +#include "interfaces/commands/detach_role.hpp" +#include "interfaces/commands/grant_permission.hpp" +#include "interfaces/commands/remove_peer.hpp" +#include "interfaces/commands/remove_signatory.hpp" +#include "interfaces/commands/revoke_permission.hpp" +#include "interfaces/commands/set_account_detail.hpp" +#include "interfaces/commands/set_quorum.hpp" +#include "interfaces/commands/set_setting_value.hpp" +#include "interfaces/commands/subtract_asset_quantity.hpp" +#include "interfaces/commands/transfer_asset.hpp" + +using namespace iroha; +using namespace iroha::ametsuchi; + +using shared_model::interface::permissions::Grantable; +using shared_model::interface::permissions::Role; + +using shared_model::interface::GrantablePermissionSet; +using shared_model::interface::RolePermissionSet; + +RocksDbCommandExecutor::RocksDbCommandExecutor( + std::shared_ptr db_context, + std::shared_ptr perm_converter, + std::optional> vm_caller) + : db_context_(std::move(db_context)), + perm_converter_{std::move(perm_converter)}, + vm_caller_{vm_caller}, + db_transaction_(db_context_) { + assert(db_context_); +} + +RocksDbCommandExecutor::~RocksDbCommandExecutor() = default; + +void RocksDbCommandExecutor::skipChanges() { + RocksDbCommon common(db_context_); + common.skip(); +} + +DatabaseTransaction &RocksDbCommandExecutor::dbSession() { + return db_transaction_; +} + +std::shared_ptr RocksDbCommandExecutor::getSession() { + return db_context_; +} + +CommandResult RocksDbCommandExecutor::execute( + const shared_model::interface::Command &cmd, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation) { + return boost::apply_visitor( + [this, &creator_account_id, &tx_hash, cmd_index, do_validation]( + const auto &command) -> CommandResult { + // TODO(iceseer): remove try-catch when commands will be implemented + try { + RolePermissionSet creator_permissions; + RocksDbCommon common(db_context_); + if (do_validation) { + auto const &[account_name, domain_id] = + staticSplitId<2ull>(creator_account_id); + + // get account permissions + if (auto result = + accountPermissions(common, account_name, domain_id); + expected::hasError(result)) + return expected::makeError( + CommandError{command.toString(), + result.assumeError().code, + result.assumeError().description}); + else + creator_permissions = result.assumeValue(); + } + + if (auto result = (*this)(common, + command, + creator_account_id, + tx_hash, + cmd_index, + do_validation, + creator_permissions); + expected::hasError(result)) + return expected::makeError( + CommandError{command.toString(), + result.assumeError().code, + fmt::format("Command: {}. {}", + command.toString(), + result.assumeError().description)}); + + return {}; + } catch (std::exception &e) { + return expected::makeError(CommandError{ + command.toString(), ErrorCodes::kException, e.what()}); + } + }, + cmd.get()); +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::AddAssetQuantity &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[asset_name, domain_id] = staticSplitId<2>(command.assetId()); + auto const &amount = command.amount(); + + if (do_validation) + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + creator_permissions, + Role::kAddAssetQty, + Role::kAddDomainAssetQty)); + + // check if asset exists and construct amount by precision + RDB_TRY_GET_VALUE(asset_amount, + forAsset( + common, asset_name, domain_id)); + shared_model::interface::Amount result(*asset_amount); + + RDB_TRY_GET_VALUE( + account_asset_sz, + forAccountAssetSize( + common, creator_account_name, creator_domain_id)); + uint64_t account_asset_size(account_asset_sz ? *account_asset_sz : 0ull); + + { // get account asset balance + RDB_TRY_GET_VALUE(balance, + forAccountAsset( + common, + creator_account_name, + creator_domain_id, + command.assetId())); + if (!balance) + ++account_asset_size; + else + result = std::move(*balance); + } + + result += amount; + common.valueBuffer().assign(result.toStringRepr()); + if (common.valueBuffer()[0] == 'N') + return makeError(ErrorCodes::kInvalidAssetAmount, + "Invalid asset {} amount {}", + command.assetId(), + result.toString()); + + RDB_ERROR_CHECK(forAccountAsset( + common, creator_account_name, creator_domain_id, command.assetId())); + + common.encode(account_asset_size); + RDB_ERROR_CHECK(forAccountAssetSize( + common, creator_account_name, creator_domain_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::AddPeer &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &peer = command.peer(); + if (do_validation) + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kAddPeer)); + + std::string pk; + toLowerAppend(peer.pubkey(), pk); + + RDB_ERROR_CHECK(forPeerAddress( + common, pk)); + + RDB_TRY_GET_VALUE( + opt_peers_count, + forPeersCount(common)); + + common.encode((opt_peers_count ? *opt_peers_count : 0ull) + 1ull); + RDB_ERROR_CHECK(forPeersCount(common)); + + /// Store address + common.valueBuffer().assign(peer.address()); + RDB_ERROR_CHECK(forPeerAddress(common, pk)); + + /// Store TLS if present + if (peer.tlsCertificate().has_value()) { + common.valueBuffer().assign(peer.tlsCertificate().value()); + RDB_ERROR_CHECK(forPeerTLS(common, pk)); + } + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::AddSignatory &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + + if (do_validation) { + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, + creator_account_name, + creator_domain_id, + command.accountId())); + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + if (creator_account_id == command.accountId()) { + RDB_ERROR_CHECK( + checkPermissions(creator_permissions, Role::kAddSignatory)); + } else { + RDB_ERROR_CHECK(checkGrantablePermissions(creator_permissions, + granted_account_permissions, + Grantable::kAddMySignatory)); + } + } + + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + + std::string pk; + toLowerAppend(command.pubkey(), pk); + + if (auto result = forSignatory( + common, account_name, domain_id, pk); + expected::hasError(result)) + return makeError(ErrorCodes::kSignatoryMustNotExist, + "Signatory must not exist."); + + common.valueBuffer().clear(); + RDB_ERROR_CHECK( + forSignatory(common, account_name, domain_id, pk)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::AppendRole &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + auto const &role_name = command.roleName(); + + if (do_validation) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kAppendRole)); + + RDB_TRY_GET_VALUE( + opt_permissions, + forRole(common, role_name)); + if (!opt_permissions->isSubsetOf(creator_permissions)) + return makeError(ErrorCodes::kNoPermissions, + "Insufficient permissions"); + } + + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + + // Account must not have role, else return error. + RDB_ERROR_CHECK(forAccountRole( + common, account_name, domain_id, role_name)); + + common.valueBuffer() = ""; + RDB_ERROR_CHECK(forAccountRole( + common, account_name, domain_id, role_name)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::CallEngine &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool /*do_validation*/, + shared_model::interface::RolePermissionSet const &creator_permissions) { + return makeError(ErrorCodes::kNoImplementation, "Not implemented"); +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::CompareAndSetAccountDetail &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, + creator_account_name, + creator_domain_id, + command.accountId())); + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + if (do_validation) + RDB_ERROR_CHECK(checkPermissions(creator_permissions, + granted_account_permissions, + Role::kGetMyAccDetail, + Grantable::kSetMyAccountDetail)); + + std::string_view const creator_id = !creator_account_id.empty() + ? creator_account_id + : std::string_view{"genesis"}; + + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + + RDB_TRY_GET_VALUE( + opt_detail, + forAccountDetail( + common, account_name, domain_id, creator_id, command.key())); + + bool const eq = (command.oldValue() && opt_detail) + ? *opt_detail == *command.oldValue() + : false; + bool const same = + command.checkEmpty() ? !command.oldValue() && !opt_detail : !opt_detail; + + if (eq || same) { + RDB_TRY_GET_VALUE( + opt_detail, + forAccountDetail( + common, + account_name, + domain_id, + !creator_account_id.empty() ? creator_account_id : "genesis", + command.key())); + + common.valueBuffer().assign(command.value()); + RDB_ERROR_CHECK(forAccountDetail( + common, account_name, domain_id, creator_id, command.key())); + + if (!opt_detail) { + RDB_TRY_GET_VALUE( + opt_acc_details_count, + forAccountDetailsCount( + common, account_name, domain_id)); + const uint64_t count = + opt_acc_details_count ? *opt_acc_details_count : 0ull; + + common.encode(count + 1ull); + RDB_ERROR_CHECK(forAccountDetailsCount( + common, account_name, domain_id)); + } + + return {}; + } + + return makeError(ErrorCodes::kIncorrectOldValue, "Old value incorrect"); +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::CreateAccount &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &account_name = command.accountName(); + auto const &domain_id = command.domainId(); + std::string pubkey; + + if (do_validation) + RDB_ERROR_CHECK( + checkPermissions(creator_permissions, Role::kCreateAccount)); + + // check if domain exists + RDB_TRY_GET_VALUE( + opt_default_role, + forDomain(common, domain_id)); + std::string default_role(*opt_default_role); + + RDB_TRY_GET_VALUE( + opt_permissions, + forRole(common, default_role)); + + if (do_validation && !opt_permissions->isSubsetOf(creator_permissions)) + return makeError(ErrorCodes::kNoPermissions, + "Insufficient permissions"); + + common.valueBuffer() = ""; + RDB_ERROR_CHECK(forAccountRole( + common, account_name, domain_id, default_role)); + + // check if account already exists + if (do_validation) + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + + common.valueBuffer() = ""; + RDB_ERROR_CHECK(forSignatory( + common, + account_name, + domain_id, + toLowerAppend(command.pubkey(), pubkey))); + + common.encode(1); + RDB_ERROR_CHECK( + forQuorum(common, account_name, domain_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::CreateAsset &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &domain_id = command.domainId(); + auto const &asset_name = command.assetName(); + + if (do_validation) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kCreateAsset)); + + // check if asset already exists + RDB_ERROR_CHECK(forAsset( + common, asset_name, domain_id)); + + // check if domain exists + RDB_ERROR_CHECK(forDomain( + common, domain_id)); + } + + common.encode(command.precision()); + RDB_ERROR_CHECK(forAsset(common, asset_name, domain_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::CreateDomain &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &domain_id = command.domainId(); + auto const &default_role = command.userDefaultRole(); + + if (do_validation) { + // no privilege escalation check here + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kCreateDomain)); + + // check if domain already exists + RDB_ERROR_CHECK(forDomain( + common, domain_id)); + + // check if role exists + RDB_ERROR_CHECK(forRole( + common, default_role)); + } + + uint64_t domains_count = 0ull; + if (auto result = + forDomainsTotalCount(common); + expected::hasValue(result) && result.assumeValue()) + domains_count = *result.assumeValue(); + + common.encode(domains_count + 1ull); + forDomainsTotalCount(common); + + common.valueBuffer().assign(default_role); + RDB_ERROR_CHECK(forDomain(common, domain_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::CreateRole &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &role_name = command.roleName(); + auto role_permissions = command.rolePermissions(); + if (role_permissions.isSet(Role::kRoot)) + role_permissions.setAll(); + + if (do_validation) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kCreateRole)); + + if (!role_permissions.isSubsetOf(creator_permissions)) + return makeError(ErrorCodes::kNoPermissions, + "Insufficient permissions"); + } + + // check if role already exists + if (auto result = forRole( + common, role_name); + expected::hasError(result)) + return makeError(ErrorCodes::kRoleAlreadyExists, "Already exists."); + + common.valueBuffer().assign(role_permissions.toBitstring()); + RDB_ERROR_CHECK(forRole(common, role_name)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::DetachRole &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + auto const &role_name = command.roleName(); + + if (do_validation) + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kDetachRole)); + + RDB_ERROR_CHECK( + forRole(common, role_name)); + + if (do_validation) + RDB_ERROR_CHECK(forAccountRole( + common, account_name, domain_id, role_name)); + + RDB_ERROR_CHECK(forAccountRole( + common, account_name, domain_id, role_name)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::GrantPermission &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + + auto const granted_perm = command.permissionName(); + auto const required_perm = + shared_model::interface::permissions::permissionFor(granted_perm); + + if (do_validation) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, required_perm)); + + // check if account exists + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + } + + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, account_name, domain_id, creator_account_id)); + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + // check if already granted + if (granted_account_permissions.isSet(granted_perm)) + return makeError(ErrorCodes::kPermissionIsAlreadySet, + "Permission is already set."); + + granted_account_permissions.set(granted_perm); + common.valueBuffer().assign(granted_account_permissions.toBitstring()); + RDB_ERROR_CHECK( + forGrantablePermissions( + common, account_name, domain_id, creator_account_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::RemovePeer &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + if (command.pubkey().empty()) + return makeError(ErrorCodes::kPublicKeyIsEmpty, "Pubkey empty."); + + if (do_validation) + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kRemovePeer)); + + std::string pk; + toLowerAppend(command.pubkey(), pk); + + RDB_ERROR_CHECK( + forPeerAddress(common, pk)); + + RDB_TRY_GET_VALUE( + opt_peers_count, + forPeersCount(common)); + if (*opt_peers_count == 1ull) + return makeError( + ErrorCodes::kPeersCountIsNotEnough, "Can not remove last peer {}.", pk); + + common.encode(*opt_peers_count - 1ull); + RDB_ERROR_CHECK(forPeersCount(common)); + + RDB_ERROR_CHECK( + forPeerAddress(common, pk)); + RDB_ERROR_CHECK( + forPeerTLS(common, pk)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::RemoveSignatory &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + + uint64_t quorum; + if (auto result = forQuorum( + common, account_name, domain_id); + expected::hasError(result)) + return makeError(ErrorCodes::kNoAccount, + std::move(result.assumeError())); + else + quorum = *result.assumeValue(); + + if (do_validation) { + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, + creator_account_name, + creator_domain_id, + command.accountId())); + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + if (creator_account_id == command.accountId()) { + RDB_ERROR_CHECK( + checkPermissions(creator_permissions, Role::kRemoveSignatory)); + } else { + RDB_ERROR_CHECK(checkGrantablePermissions(creator_permissions, + granted_account_permissions, + Grantable::kRemoveMySignatory)); + } + } + + std::string pk; + toLowerAppend(command.pubkey(), pk); + + if (auto result = forSignatory( + common, account_name, domain_id, pk); + expected::hasError(result)) + return makeError(ErrorCodes::kNoSignatory, + std::move(result.assumeError())); + + uint64_t counter = 0; + auto status = enumerateKeys(common, + [&](auto key) { + ++counter; + return true; + }, + fmtstrings::kPathSignatories, + domain_id, + account_name); + + if (counter <= quorum) + return makeError( + ErrorCodes::kCountNotEnough, + "Remove signatory {} for account {} with quorum {} failed.", + pk, + command.accountId(), + quorum); + + RDB_ERROR_CHECK(forSignatory( + common, account_name, domain_id, pk)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::RevokePermission &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + auto const revoked_perm = command.permissionName(); + + if (do_validation) { + // check if account exists + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + } + + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, account_name, domain_id, creator_account_id)); + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + // check if not granted + if (!granted_account_permissions.isSet(revoked_perm)) + return makeError(ErrorCodes::kNoPermissions, "Permission not set"); + + granted_account_permissions.unset(revoked_perm); + common.valueBuffer().assign(granted_account_permissions.toBitstring()); + RDB_ERROR_CHECK( + forGrantablePermissions( + common, account_name, domain_id, creator_account_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::SetAccountDetail &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + + if (do_validation) { + if (command.accountId() != creator_account_id) { + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, + creator_account_name, + creator_domain_id, + command.accountId())); + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + RDB_ERROR_CHECK(checkPermissions(creator_permissions, + granted_account_permissions, + Role::kSetDetail, + Grantable::kSetMyAccountDetail)); + } + + // check if account exists + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + } + + RDB_TRY_GET_VALUE( + opt_detail, + forAccountDetail( + common, + account_name, + domain_id, + !creator_account_id.empty() ? creator_account_id : "genesis", + command.key())); + + common.valueBuffer().assign(command.value()); + RDB_ERROR_CHECK(forAccountDetail( + common, + account_name, + domain_id, + !creator_account_id.empty() ? creator_account_id : "genesis", + command.key())); + + if (!opt_detail) { + RDB_TRY_GET_VALUE( + opt_acc_details_count, + forAccountDetailsCount( + common, account_name, domain_id)); + const uint64_t count = + opt_acc_details_count ? *opt_acc_details_count : 0ull; + + common.encode(count + 1ull); + RDB_ERROR_CHECK(forAccountDetailsCount( + common, account_name, domain_id)); + } + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::SetQuorum &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[account_name, domain_id] = staticSplitId<2>(command.accountId()); + + if (do_validation) { + // check if account exists + RDB_ERROR_CHECK(forAccount( + common, account_name, domain_id)); + + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, + creator_account_name, + creator_domain_id, + command.accountId())); + + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + RDB_ERROR_CHECK(checkPermissions(creator_permissions, + granted_account_permissions, + Role::kSetQuorum, + Grantable::kSetMyQuorum)); + } + + /// TODO(iceseer): check if is better to store addition value with counter + int counter = 0; + auto status = enumerateKeys(common, + [&](auto key) { + ++counter; + return true; + }, + fmtstrings::kPathSignatories, + domain_id, + account_name); + + if (command.newQuorum() > counter) + return makeError(ErrorCodes::kCountNotEnough, + "Quorum value more than signatories. {}", + command.toString()); + + common.encode(command.newQuorum()); + RDB_ERROR_CHECK( + forQuorum(common, account_name, domain_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::SubtractAssetQuantity &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + // TODO(iceseer): fix the case there will be no delimiter + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[asset_name, domain_id] = staticSplitId<2>(command.assetId()); + auto const &amount = command.amount(); + + if (do_validation) + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + creator_permissions, + Role::kSubtractAssetQty, + Role::kSubtractDomainAssetQty)); + + // check if asset exists + RDB_TRY_GET_VALUE(opt_result, + forAsset( + common, asset_name, domain_id)); + + if (*opt_result < command.amount().precision()) + return makeError( + 3, + "Invalid precision of asset: {} from: {}. Expected: {}, but got: {}", + command.assetId(), + creator_account_id, + *opt_result, + command.amount().precision()); + + shared_model::interface::Amount result(*opt_result); + RDB_TRY_GET_VALUE( + opt_amount, + forAccountAsset( + common, creator_account_name, creator_domain_id, command.assetId())); + if (opt_amount) + result = std::move(*opt_amount); + + result -= amount; + common.valueBuffer().assign(result.toStringRepr()); + if (common.valueBuffer()[0] == 'N') + return makeError(ErrorCodes::kInvalidAmount, + "Invalid {} amount {} from {}", + command.toString(), + result.toString(), + creator_account_id); + + RDB_ERROR_CHECK(forAccountAsset( + common, creator_account_name, creator_domain_id, command.assetId())); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::TransferAsset &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType /*cmd_index*/, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2>(creator_account_id); + auto const &[source_account_name, source_domain_id] = + staticSplitId<2>(command.srcAccountId()); + auto const &[destination_account_name, destination_domain_id] = + staticSplitId<2>(command.destAccountId()); + auto const &[asset_name, domain_id] = staticSplitId<2>(command.assetId()); + auto const &amount = command.amount(); + auto const &description = command.description(); + + // check if destination account exists + RDB_ERROR_CHECK(forAccount( + common, destination_account_name, destination_domain_id)); + + // check if source account exists + RDB_ERROR_CHECK(forAccount( + common, source_account_name, source_domain_id)); + + if (do_validation) { + // get account permissions + RDB_TRY_GET_VALUE( + destination_permissions, + accountPermissions( + common, destination_account_name, destination_domain_id)); + if (!destination_permissions.isSet(Role::kReceive)) + return makeError(ErrorCodes::kNoPermissions, + "Not enough permissions. {}", + command.toString()); + + if (command.srcAccountId() != creator_account_id) { + GrantablePermissionSet granted_account_permissions; + RDB_TRY_GET_VALUE( + opt_permissions, + forGrantablePermissions( + common, + creator_account_name, + creator_domain_id, + command.srcAccountId())); + + if (opt_permissions) + granted_account_permissions = *opt_permissions; + + RDB_ERROR_CHECK(checkPermissions(creator_permissions, + granted_account_permissions, + Role::kTransfer, + Grantable::kTransferMyAssets)); + } else + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kTransfer)); + + // check if asset exists + RDB_ERROR_CHECK(forAsset( + common, asset_name, domain_id)); + + auto status = common.get(fmtstrings::kSetting, + iroha::ametsuchi::kMaxDescriptionSizeKey); + RDB_ERROR_CHECK(canExist( + status, [&] { return fmt::format("Max description size key"); })); + + if (status.ok()) { + uint64_t max_description_size; + common.decode(max_description_size); + if (description.size() > max_description_size) + return makeError(ErrorCodes::kInvalidFieldSize, + "Too big description"); + } + } + + RDB_TRY_GET_VALUE( + opt_source_balance, + forAccountAsset( + common, source_account_name, source_domain_id, command.assetId())); + shared_model::interface::Amount source_balance( + std::move(*opt_source_balance)); + + source_balance -= amount; + if (source_balance.toStringRepr()[0] == 'N') + return makeError(ErrorCodes::kNotEnoughAssets, "Not enough assets"); + + RDB_TRY_GET_VALUE( + opt_account_asset_size, + forAccountAssetSize( + common, destination_account_name, destination_domain_id)); + uint64_t account_asset_size = + opt_account_asset_size ? *opt_account_asset_size : 0ull; + + shared_model::interface::Amount destination_balance( + source_balance.precision()); + + RDB_TRY_GET_VALUE(opt_amount, + forAccountAsset( + common, + destination_account_name, + destination_domain_id, + command.assetId())); + + if (opt_amount) + destination_balance = *opt_amount; + else + ++account_asset_size; + + destination_balance += amount; + if (destination_balance.toStringRepr()[0] == 'N') + return makeError(ErrorCodes::kIncorrectBalance, "Incorrect balance"); + + common.valueBuffer().assign(source_balance.toStringRepr()); + RDB_ERROR_CHECK(forAccountAsset( + common, source_account_name, source_domain_id, command.assetId())); + + common.valueBuffer().assign(destination_balance.toStringRepr()); + RDB_ERROR_CHECK(forAccountAsset(common, + destination_account_name, + destination_domain_id, + command.assetId())); + + common.encode(account_asset_size); + RDB_ERROR_CHECK(forAccountAssetSize( + common, destination_account_name, destination_domain_id)); + + return {}; +} + +RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( + RocksDbCommon &common, + const shared_model::interface::SetSettingValue &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &, + shared_model::interface::types::CommandIndexType, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &key = command.key(); + auto const &value = command.value(); + + common.valueBuffer().assign(value); + RDB_ERROR_CHECK(forSettings(common, key)); + + return {}; +} diff --git a/irohad/ametsuchi/impl/rocksdb_command_executor.hpp b/irohad/ametsuchi/impl/rocksdb_command_executor.hpp new file mode 100644 index 00000000000..b272c73b526 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_command_executor.hpp @@ -0,0 +1,282 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_COMMAND_EXECUTOR_HPP +#define IROHA_ROCKSDB_COMMAND_EXECUTOR_HPP + +#include + +#include +#include "ametsuchi/command_executor.hpp" +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "ametsuchi/impl/rocksdb_db_transaction.hpp" +#include "interfaces/permissions.hpp" + +namespace rocksdb { + class Transaction; +} + +namespace shared_model::interface { + class AddAssetQuantity; + class AddPeer; + class AddSignatory; + class AppendRole; + class CompareAndSetAccountDetail; + class CallEngine; + class CreateAccount; + class CreateAsset; + class CreateDomain; + class CreateRole; + class DetachRole; + class GrantPermission; + class PermissionToString; + class RemovePeer; + class RemoveSignatory; + class RevokePermission; + class SetAccountDetail; + class SetQuorum; + class SubtractAssetQuantity; + class TransferAsset; + class SetSettingValue; +} // namespace shared_model::interface + +namespace iroha::ametsuchi { + + class VmCaller; + + class RocksDbCommandExecutor final : public CommandExecutor { + public: + using ExecutionResult = expected::Result; + + enum ErrorCodes { + kNoPermissions = 2, + kNoAccount = 3, + kInvalidAmount = 3, + kRoleAlreadyExists = 3, + kSignatoryMustNotExist = 3, + kInvalidAssetAmount = 4, + kIncorrectOldValue = 4, + kPeersCountIsNotEnough = 4, + kNoSignatory = 4, + kCountNotEnough = 5, + kNotEnoughAssets = 6, + kIncorrectBalance = 7, + kException = 1002, + kNoImplementation = 1005, + kPermissionIsAlreadySet = 1007, + kPublicKeyIsEmpty = 1008, + kInvalidFieldSize = 1009, + }; + + RocksDbCommandExecutor( + std::shared_ptr db_context, + std::shared_ptr + perm_converter, + std::optional> vm_caller); + + ~RocksDbCommandExecutor(); + + CommandResult execute( + const shared_model::interface::Command &cmd, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation) override; + + void skipChanges() override; + DatabaseTransaction &dbSession() override; + std::shared_ptr getSession(); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::AddAssetQuantity &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::AddPeer &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::AddSignatory &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::CallEngine &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::AppendRole &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::CompareAndSetAccountDetail &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::CreateAccount &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::CreateAsset &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::CreateDomain &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::CreateRole &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::DetachRole &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GrantPermission &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::RemovePeer &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::RemoveSignatory &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::RevokePermission &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::SetAccountDetail &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::SetQuorum &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::SubtractAssetQuantity &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::TransferAsset &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &tx_hash, + shared_model::interface::types::CommandIndexType cmd_index, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::SetSettingValue &command, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &, + shared_model::interface::types::CommandIndexType, + bool do_validation, + shared_model::interface::RolePermissionSet const &creator_permissions); + + private: + std::shared_ptr db_context_; + std::shared_ptr + perm_converter_; + std::optional> vm_caller_; + RocksDbTransaction db_transaction_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_ROCKSDB_COMMAND_EXECUTOR_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_common.hpp b/irohad/ametsuchi/impl/rocksdb_common.hpp new file mode 100644 index 00000000000..b7922591f57 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_common.hpp @@ -0,0 +1,1768 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_COMMON_HPP +#define IROHA_ROCKSDB_COMMON_HPP + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "ametsuchi/impl/executor_common.hpp" +#include "common/irohad_version.hpp" +#include "common/result.hpp" +#include "interfaces/common_objects/amount.hpp" +#include "interfaces/common_objects/types.hpp" +#include "interfaces/permissions.hpp" + +// clang-format off +/** + * RocksDB data structure. + * + * |ROOT|-+-|STORE|-+- + * | +- + * | +- + * | +- + * | +- + * | + * +-|WSV|-+-|NETWORK|-+-|PEERS|-+-|ADDRESS|-+- + * | | | +- + * | | | + * | | +-|TLS|-+- + * | | | +- + * | | | + * | | +- + * | | + * | +-|STORE|-+- + * | + * +-|SETTINGS|-+- + * | +- + * | +- + * | + * +-|ROLES|-+- + * | +- + * | +- + * | + * +-|TRANSACTIONS|-+-|ACCOUNTS|-+--+-|POSITION|-+- + * | | | | +- + * | | | | +- + * | | | | + * | | | +-|TIMESTAMP|-+- + * | | | | +- + * | | | | +- + * | | | | + * | | | +- + * | | | + * | | +--+-|POSITION|-+- + * | | | +- + * | | | +- + * | | | + * | | +-|TIMESTAMP|-+- + * | | | +- + * | | | +- + * | | | + * | | +- + * | | + * | +-|STATUSES|-+- + * | | +- + * | | + * | +- + * | + * +-|DOMAIN|-+-|DOMAIN_1|-+-|ASSETS|-+- + * | | | +- + * | | | + * | | +-|ACCOUNTS|-|NAME_1|-+-|ASSETS|-+- + * | | | +- + * | | | + * | | +-|OPTIONS|-+- + * | | | +- + * | | | +- + * | | | + * | | +-|DETAILS|-+-- + * | | | + * | | +-|ROLES|-+- + * | | | +- + * | | | + * | | +-|GRANTABLE_PER|-+- + * | | | +- + * | | | + * | | +-|SIGNATORIES|-+- + * | | +- + * | | + * | +- + * | +- + * | + * +- + * + * + * ###################################### + * ############# LEGEND MAP ############# + * ###################################### + * + * ###################################### + * ### Directory ## Mnemonics ### + * ###################################### + * ### DELIMITER ## / ### + * ### ROOT ## ### + * ### STORE ## s ### + * ### WSV ## w ### + * ### NETWORK ## n ### + * ### SETTINGS ## i ### + * ### ASSETS ## x ### + * ### ROLES ## r ### + * ### TRANSACTIONS ## t ### + * ### ACCOUNTS ## a ### + * ### PEERS ## p ### + * ### STATUSES ## u ### + * ### DETAILS ## d ### + * ### GRANTABLE_PER ## g ### + * ### POSITION ## P ### + * ### TIMESTAMP ## T ### + * ### DOMAIN ## D ### + * ### SIGNATORIES ## S ### + * ### OPTIONS ## O ### + * ### ADDRESS ## M ### + * ### TLS ## N ### + * ###################################### + * + * ###################################### + * ### File ## Mnemonics ### + * ###################################### + * ### F_QUORUM ## q ### + * ### F_ASSET SIZE ## I ### + * ### F_TOP BLOCK ## Q ### + * ### F_PEERS COUNT ## Z ### + * ### F_TOTAL COUNT ## V ### + * ### F_VERSION ## v ### + * ###################################### + * + * ###################################### + * ############# EXAMPLE ################ + * ###################################### + * + * GetAccountTransactions(ACCOUNT, TS) -> KEY: wta/ACCOUNT/T/TS/ + * GetAccountAssets(DOMAIN,ACCOUNT) -> KEY: wD/DOMAIN/a/ACCOUNT/x + */ +// clang-format on + +#define RDB_DELIMITER "/" +#define RDB_XXX RDB_DELIMITER "{}" RDB_DELIMITER + +#define RDB_ROOT "" +#define RDB_STORE "s" +#define RDB_WSV "w" +#define RDB_NETWORK "n" +#define RDB_SETTINGS "i" +#define RDB_ASSETS "x" +#define RDB_ROLES "r" +#define RDB_TRANSACTIONS "t" +#define RDB_ACCOUNTS "a" +#define RDB_PEERS "p" +#define RDB_STATUSES "u" +#define RDB_DETAILS "d" +#define RDB_GRANTABLE_PER "g" +#define RDB_POSITION "P" +#define RDB_TIMESTAMP "T" +#define RDB_DOMAIN "D" +#define RDB_SIGNATORIES "S" +#define RDB_OPTIONS "O" +#define RDB_ADDRESS "M" +#define RDB_TLS "N" + +#define RDB_F_QUORUM "q" +#define RDB_F_ASSET_SIZE "I" +#define RDB_F_TOP_BLOCK "Q" +#define RDB_F_PEERS_COUNT "Z" +#define RDB_F_TOTAL_COUNT "V" +#define RDB_F_VERSION "v" + +#define RDB_PATH_DOMAIN RDB_ROOT /**/ RDB_WSV /**/ RDB_DOMAIN /**/ RDB_XXX +#define RDB_PATH_ACCOUNT RDB_PATH_DOMAIN /**/ RDB_ACCOUNTS /**/ RDB_XXX + +namespace iroha::ametsuchi::fmtstrings { + static constexpr size_t kDelimiterSize = + sizeof(RDB_DELIMITER) / sizeof(RDB_DELIMITER[0]) - 1ull; + + static constexpr size_t kDelimiterCountForAField = 2ull; + + static const std::string kDelimiter{RDB_DELIMITER}; + + /** + * ###################################### + * ############## PATHS ################# + * ###################################### + */ + // domain_id/account_name + static auto constexpr kPathAccountRoles{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_ROLES)}; + + static auto constexpr kPathWsv{FMT_STRING(RDB_ROOT /**/ RDB_WSV)}; + + static auto constexpr kPathStore{FMT_STRING(RDB_ROOT /**/ RDB_STORE)}; + + // domain_id/account_name + static auto constexpr kPathAccount{FMT_STRING(RDB_PATH_ACCOUNT)}; + + // no params + static auto constexpr kPathPeers{FMT_STRING( + RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ RDB_ADDRESS)}; + + // domain_id/account_name + static auto constexpr kPathSignatories{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_SIGNATORIES)}; + + // no param + static auto constexpr kPathRoles{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_ROLES)}; + + // account + static auto constexpr kPathTransactionByTs{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ + RDB_ACCOUNTS /**/ RDB_XXX /**/ RDB_TIMESTAMP)}; + + // account + static auto constexpr kPathTransactionByPosition{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ + RDB_ACCOUNTS /**/ RDB_XXX /**/ RDB_POSITION)}; + + // domain_id/account_name ➡️ value + static auto constexpr kPathAccountDetail{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_DETAILS)}; + + // account_domain_id/account_name/asset_id + static auto constexpr kPathAccountAssets{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_ASSETS)}; + + /** + * ###################################### + * ############# FOLDERS ################ + * ###################################### + */ + // height ➡️ block data + static auto constexpr kBlockDataInStore{ + FMT_STRING(RDB_ROOT /**/ RDB_STORE /**/ RDB_XXX)}; + + // account/height/index/ts ➡️ tx_hash + static auto constexpr kTransactionByPosition{FMT_STRING( + RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ RDB_ACCOUNTS /**/ + RDB_XXX /**/ RDB_POSITION /**/ RDB_XXX /**/ RDB_XXX /**/ RDB_XXX)}; + + // account/ts/height/index ➡️ tx_hash + static auto constexpr kTransactionByTs{FMT_STRING( + RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ RDB_ACCOUNTS /**/ + RDB_XXX /**/ RDB_TIMESTAMP /**/ RDB_XXX /**/ RDB_XXX /**/ RDB_XXX)}; + + // account/height ➡️ tx_hash + static auto constexpr kTransactionByHeight{FMT_STRING( + RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ RDB_ACCOUNTS /**/ + RDB_XXX /**/ RDB_POSITION /**/ RDB_XXX)}; + + // account/ts/height/index ➡️ tx_hash + static auto constexpr kTransactionByTsLowerBound{FMT_STRING( + RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ RDB_ACCOUNTS /**/ + RDB_XXX /**/ RDB_TIMESTAMP /**/ RDB_XXX)}; + + // tx_hash ➡️ status + static auto constexpr kTransactionStatus{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ + RDB_STATUSES /**/ RDB_XXX)}; + + // domain_id/account_name/role_name + static auto constexpr kAccountRole{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_ROLES /**/ RDB_XXX)}; + + // role_name ➡️ permissions + static auto constexpr kRole{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_ROLES /**/ + RDB_XXX)}; + + // domain_id/account_name/pubkey ➡️ "" + static auto constexpr kSignatory{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_SIGNATORIES /**/ RDB_XXX)}; + + // domain_id/asset_name ➡️ precision + static auto constexpr kAsset{ + FMT_STRING(RDB_PATH_DOMAIN /**/ RDB_ASSETS /**/ RDB_XXX)}; + + // account_domain_id/account_name/asset_id ➡️ amount + static auto constexpr kAccountAsset{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_ASSETS /**/ RDB_XXX)}; + + // domain_id/account_name/writer_id/key ➡️ + // value + static auto constexpr kAccountDetail{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_DETAILS /**/ RDB_XXX /**/ RDB_XXX)}; + + // pubkey ➡️ address + static auto constexpr kPeerAddress{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ + RDB_ADDRESS /**/ RDB_XXX)}; + + // pubkey ➡️ tls + static auto constexpr kPeerTLS{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ + RDB_TLS /**/ RDB_XXX)}; + + // domain_id/account_name/grantee_domain_id/grantee_account_name + // ➡️ permissions + static auto constexpr kGranted{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_GRANTABLE_PER /**/ RDB_XXX)}; + + // key ➡️ value + static auto constexpr kSetting{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_SETTINGS /**/ RDB_XXX)}; + + /** + * ###################################### + * ############## FILES ################# + * ###################################### + */ + // domain_id ➡️ default role + static auto constexpr kDomain{FMT_STRING(RDB_PATH_DOMAIN)}; + + // "" ➡️ height # hash + static auto constexpr kTopBlock{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_STORE /**/ + RDB_F_TOP_BLOCK)}; + + // domain_id/account_name + static auto constexpr kQuorum{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_OPTIONS /**/ RDB_F_QUORUM)}; + + // account_domain_id/account_name ➡️ size + static auto constexpr kAccountAssetSize{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_OPTIONS /**/ RDB_F_ASSET_SIZE)}; + + static auto constexpr kPeersCount{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ + RDB_F_PEERS_COUNT)}; + + // account ➡️ txs total count + static auto constexpr kTxsTotalCount{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ + RDB_ACCOUNTS /**/ RDB_XXX /**/ RDB_F_TOTAL_COUNT)}; + + // ➡️ value + static auto constexpr kBlocksTotalCount{ + FMT_STRING(RDB_ROOT /**/ RDB_STORE /**/ RDB_F_TOTAL_COUNT)}; + + // ➡️ txs total count + static auto constexpr kAllTxsTotalCount{FMT_STRING( + RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ RDB_F_TOTAL_COUNT)}; + + // ➡️ domains total count + static auto constexpr kDomainsTotalCount{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_DOMAIN /**/ RDB_F_TOTAL_COUNT)}; + + // domain_id/account_name/ ➡️ value + static auto constexpr kAccountDetailsCount{ + FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_OPTIONS /**/ RDB_F_TOTAL_COUNT)}; + + // ➡️ value + static auto constexpr kStoreVersion{ + FMT_STRING(RDB_ROOT /**/ RDB_STORE /**/ RDB_F_VERSION)}; + + // ➡️ value + static auto constexpr kWsvVersion{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_F_VERSION)}; + +} // namespace iroha::ametsuchi::fmtstrings + +#undef RDB_ADDRESS +#undef RDB_TLS +#undef RDB_OPTIONS +#undef RDB_F_ASSET_SIZE +#undef RDB_PATH_DOMAIN +#undef RDB_PATH_ACCOUNT +#undef RDB_F_QUORUM +#undef RDB_DELIMITER +#undef RDB_ROOT +#undef RDB_STORE +#undef RDB_WSV +#undef RDB_NETWORK +#undef RDB_SETTINGS +#undef RDB_ASSETS +#undef RDB_ROLES +#undef RDB_TRANSACTIONS +#undef RDB_ACCOUNTS +#undef RDB_PEERS +#undef RDB_STATUSES +#undef RDB_DETAILS +#undef RDB_GRANTABLE_PER +#undef RDB_POSITION +#undef RDB_TIMESTAMP +#undef RDB_DOMAIN +#undef RDB_SIGNATORIES +#undef RDB_ITEM +#undef RDB_F_TOP_BLOCK +#undef RDB_F_PEERS_COUNT +#undef RDB_F_TOTAL_COUNT +#undef RDB_F_VERSION + +namespace { + auto constexpr kValue{FMT_STRING("{}")}; +} + +namespace iroha::ametsuchi { + + struct RocksDBPort; + class RocksDbCommon; + + /** + * RocksDB transaction context. + */ + struct RocksDBContext { + RocksDBContext(RocksDBContext const &) = delete; + RocksDBContext &operator=(RocksDBContext const &) = delete; + + explicit RocksDBContext(std::shared_ptr dbp) + : db_port(std::move(dbp)) { + assert(db_port); + } + + private: + friend class RocksDbCommon; + friend struct RocksDBPort; + + /// RocksDB transaction + std::unique_ptr transaction; + + /// Buffer for key data + fmt::memory_buffer key_buffer; + + /// Buffer for value data + std::string value_buffer; + + /// Database port + std::shared_ptr db_port; + + /// Mutex to guard multithreaded access to this context + std::mutex this_context_cs; + }; + + enum DbErrorCode { + kOk = 0, + kErrorNoPermissions = 2, + kNotFound = 3, + kNoAccount = 3, + kMustNotExist = 4, + kNoRoles = 4, + kInvalidPagination = 4, + kInvalidStatus = 12, + kInitializeFailed = 15, + kOperationFailed = 16, + }; + + /// Db errors structure + struct DbError final { + uint32_t code; + std::string description; + }; + + template + inline expected::Result makeError(uint32_t code, + char const *format, + Args &&... args) { + assert(format != nullptr); + return expected::makeError( + DbError{code, fmt::format(format, std::forward(args)...)}); + } + + template + inline expected::Result makeError(uint32_t code, DbError &&e) { + return expected::makeError(DbError{code, std::move(e.description)}); + } + + /** + * Port to provide access to RocksDB instance. + */ + struct RocksDBPort { + RocksDBPort(RocksDBPort const &) = delete; + RocksDBPort &operator=(RocksDBPort const &) = delete; + RocksDBPort() = default; + + expected::Result initialize(std::string const &db_name) { + if (db_name_) { + assert(*db_name_ == db_name); + return {}; + } + + rocksdb::Options options; + options.create_if_missing = true; + + rocksdb::OptimisticTransactionDB *transaction_db; + auto status = rocksdb::OptimisticTransactionDB::Open( + options, db_name, &transaction_db); + + if (!status.ok()) + return makeError(DbErrorCode::kInitializeFailed, + "Db {} initialization failed with status: {}.", + db_name, + status.ToString()); + + transaction_db_.reset(transaction_db); + db_name_ = db_name; + return {}; + } + + private: + std::unique_ptr transaction_db_; + std::optional db_name_; + friend class RocksDbCommon; + + void prepareTransaction(RocksDBContext &tx_context) { + assert(transaction_db_); + tx_context.transaction.reset( + transaction_db_->BeginTransaction(rocksdb::WriteOptions())); + } + }; + +#define RDB_ERROR_CHECK(...) \ + if (auto _tmp_gen_var = (__VA_ARGS__); \ + iroha::expected::hasError(_tmp_gen_var)) \ + return _tmp_gen_var.assumeError() + +#define RDB_ERROR_CHECK_TO_STR(...) \ + if (auto _tmp_gen_var = (__VA_ARGS__); \ + iroha::expected::hasError(_tmp_gen_var)) \ + return _tmp_gen_var.assumeError().description + +#define RDB_TRY_GET_VALUE(name, ...) \ + typename decltype(__VA_ARGS__)::ValueInnerType name; \ + if (auto _tmp_gen_var = (__VA_ARGS__); \ + iroha::expected::hasError(_tmp_gen_var)) \ + return _tmp_gen_var.assumeError(); \ + else \ + name = std::move(_tmp_gen_var.assumeValue()) + +#define RDB_TRY_GET_VALUE_OR_STR_ERR(name, ...) \ + typename decltype(__VA_ARGS__)::ValueInnerType name; \ + if (auto _tmp_gen_var = (__VA_ARGS__); \ + iroha::expected::hasError(_tmp_gen_var)) \ + return _tmp_gen_var.assumeError().description; \ + else \ + name = std::move(_tmp_gen_var.assumeValue()) + + /** + * Base functions to interact with RocksDB data. + */ + class RocksDbCommon { + public: + explicit RocksDbCommon(std::shared_ptr tx_context) + : tx_context_(std::move(tx_context)), + context_guard_(tx_context_->this_context_cs) { + assert(tx_context_); + assert(tx_context_->db_port); + } + + /// Get value buffer + auto &valueBuffer() { + return tx_context_->value_buffer; + } + + /// Get key buffer + auto &keyBuffer() { + return tx_context_->key_buffer; + } + + private: + auto &transaction() { + if (!tx_context_->transaction) + tx_context_->db_port->prepareTransaction(*tx_context_); + return tx_context_->transaction; + } + + [[nodiscard]] bool isTransaction() const { + return tx_context_->transaction != nullptr; + } + + /// Iterate over all the keys begins from it, and matches a prefix from + /// keybuffer and call lambda with key-value. To stop enumeration callback F + /// must return false. + template + auto enumerate(std::unique_ptr &it, F &&func) { + if (!it->status().ok()) + return it->status(); + + rocksdb::Slice const key(keyBuffer().data(), keyBuffer().size()); + for (; it->Valid() && it->key().starts_with(key); it->Next()) + if (!std::forward(func)(it, key.size())) + break; + + return it->status(); + } + + public: + /// Makes commit to DB + auto commit() { + rocksdb::Status status; + if (isTransaction()) + status = transaction()->Commit(); + + transaction().reset(); + return status; + } + + /// Rollback all transaction changes + auto rollback() { + rocksdb::Status status; + if (isTransaction()) + status = transaction()->Rollback(); + + transaction().reset(); + return status; + } + + auto release() { + rocksdb::Status status; + if (isTransaction()) + status = transaction()->PopSavePoint(); + return status; + } + + /// Prepare tx for 2pc + auto prepare() { + rocksdb::Status status; + if (isTransaction()) + status = transaction()->Prepare(); + return status; + } + + /// Skips all changes made in this transaction + void skip() { + if (isTransaction()) + transaction().reset(); + } + + /// Saves current state of a transaction + void savepoint() { + if (isTransaction()) + transaction()->SetSavePoint(); + } + + /// Restores to the previously saved savepoint + auto rollbackToSavepoint() { + rocksdb::Status status; + if (isTransaction()) + status = transaction()->RollbackToSavePoint(); + return status; + } + + /// Encode number into @see valueBuffer + auto encode(uint64_t number) { + valueBuffer().clear(); + fmt::format_to(std::back_inserter(valueBuffer()), kValue, number); + } + + /// Decode number from @see valueBuffer + auto decode(uint64_t &number) { + return std::from_chars(valueBuffer().data(), + valueBuffer().data() + valueBuffer().size(), + number); + } + + /// Read data from database to @see valueBuffer + template + auto get(S const &fmtstring, Args &&... args) { + keyBuffer().clear(); + fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); + + valueBuffer().clear(); + return transaction()->Get( + rocksdb::ReadOptions(), + rocksdb::Slice(keyBuffer().data(), keyBuffer().size()), + &valueBuffer()); + } + + /// Put data from @see valueBuffer to database + template + auto put(S const &fmtstring, Args &&... args) { + keyBuffer().clear(); + fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); + + return transaction()->Put( + rocksdb::Slice(keyBuffer().data(), keyBuffer().size()), + valueBuffer()); + } + + /// Delete database entry by the key + template + auto del(S const &fmtstring, Args &&... args) { + keyBuffer().clear(); + fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); + + return transaction()->Delete( + rocksdb::Slice(keyBuffer().data(), keyBuffer().size())); + } + + /// Searches for the first key that matches a prefix + template + auto seek(S const &fmtstring, Args &&... args) { + keyBuffer().clear(); + fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); + + std::unique_ptr it( + transaction()->GetIterator(rocksdb::ReadOptions())); + it->Seek(rocksdb::Slice(keyBuffer().data(), keyBuffer().size())); + + return it; + } + + /// Iterate over all the keys begins from it, and matches a prefix and call + /// lambda with key-value. To stop enumeration callback F must return false. + template + auto enumerate(std::unique_ptr &it, + F &&func, + S const &fmtstring, + Args &&... args) { + keyBuffer().clear(); + fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); + return enumerate(it, std::forward(func)); + } + + /// Iterate over all the keys that matches a prefix and call lambda + /// with key-value. To stop enumeration callback F must return false. + template + auto enumerate(F &&func, S const &fmtstring, Args &&... args) { + auto it = seek(fmtstring, std::forward(args)...); + return enumerate(it, std::forward(func)); + } + + /// Removes range of items by key-filter + template + auto filterDelete(S const &fmtstring, Args &&... args) { + auto it = seek(fmtstring, std::forward(args)...); + if (!it->status().ok()) + return it->status(); + + rocksdb::Slice const key(keyBuffer().data(), keyBuffer().size()); + for (; it->Valid() && it->key().starts_with(key); it->Next()) + if (auto status = transaction()->Delete(it->key()); !status.ok()) + return status; + + return it->status(); + } + + private: + std::shared_ptr tx_context_; + std::lock_guard context_guard_; + }; + + /** + * Supported operations. + */ + enum struct kDbOperation { + kGet, /// read the value by the key + kCheck, /// check the entry exists by the key + kPut, /// put the value with the key + kDel /// delete entry by the key + }; + + /** + * DB operation result assertion. If the result is not matches the assertion + * than error will be generated + */ + enum struct kDbEntry { + kMustExist, /// entry must exist and data must be accessible + kMustNotExist, /// entry must NOT exist. If it exist than error will be + /// generated + kCanExist /// entry can exist or not. kDbOperation::kGet will return data + /// only if present, otherwise null-data + }; + + /// Enumerating through all the keys matched to prefix without reading value + template + inline auto enumerateKeys(RocksDbCommon &rdb, + F &&func, + S const &strformat, + Args &&... args) { + return rdb.enumerate( + [func{std::forward(func)}](auto const &it, + auto const prefix_size) mutable { + assert(it->Valid()); + auto const key = it->key(); + return std::forward(func)(rocksdb::Slice( + key.data() + prefix_size + fmtstrings::kDelimiterSize, + key.size() - prefix_size + - fmtstrings::kDelimiterCountForAField + * fmtstrings::kDelimiterSize)); + }, + strformat, + std::forward(args)...); + } + + template + inline auto makeKVLambda(F &&func) { + return [func{std::forward(func)}](auto const &it, + auto const prefix_size) mutable { + assert(it->Valid()); + auto const key = it->key(); + return std::forward(func)( + rocksdb::Slice(key.data() + prefix_size + fmtstrings::kDelimiterSize, + key.size() - prefix_size + - fmtstrings::kDelimiterCountForAField + * fmtstrings::kDelimiterSize), + it->value()); + }; + } + + /// Enumerating through all the keys matched to prefix and read the value + template + inline auto enumerateKeysAndValues(RocksDbCommon &rdb, + F &&func, + S const &strformat, + Args &&... args) { + return rdb.enumerate(makeKVLambda(std::forward(func)), + strformat, + std::forward(args)...); + } + + /// Enumerating through the keys, begins from it and matched to prefix and + /// read the value + template + inline auto enumerateKeysAndValues(RocksDbCommon &rdb, + F &&func, + std::unique_ptr &it, + S const &strformat, + Args &&... args) { + return rdb.enumerate(it, + makeKVLambda(std::forward(func)), + strformat, + std::forward(args)...); + } + + template + inline expected::Result mustNotExist( + rocksdb::Status const &status, F &&op_formatter) { + if (status.IsNotFound()) + return {}; + + if (!status.ok()) + return makeError(DbErrorCode::kInvalidStatus, + "{}. Failed with status: {}.", + std::forward(op_formatter)(), + status.ToString()); + + return makeError(DbErrorCode::kMustNotExist, + "{}. Must not exist.", + std::forward(op_formatter)()); + } + + template + inline expected::Result mustExist( + rocksdb::Status const &status, F &&op_formatter) { + if (status.IsNotFound()) + return makeError(DbErrorCode::kNotFound, + "{}. Was not found.", + std::forward(op_formatter)()); + + if (!status.ok()) + return makeError(DbErrorCode::kInvalidStatus, + "{}. Failed with status: {}.", + std::forward(op_formatter)(), + status.ToString()); + + return {}; + } + + template + inline expected::Result canExist(rocksdb::Status const &status, + F &&op_formatter) { + if (status.IsNotFound() || status.ok()) + return {}; + + return makeError(DbErrorCode::kInvalidStatus, + "{}. Failed with status: {}.", + std::forward(op_formatter)(), + status.ToString()); + } + + template + inline expected::Result checkStatus(rocksdb::Status status, + F &&op_formatter) { + if constexpr (kSc == kDbEntry::kMustExist) + return mustExist(status, std::forward(op_formatter)); + else if constexpr (kSc == kDbEntry::kMustNotExist) + return mustNotExist(status, std::forward(op_formatter)); + else if constexpr (kSc == kDbEntry::kCanExist) + return canExist(status, std::forward(op_formatter)); + + static_assert(kSc == kDbEntry::kMustExist || kSc == kDbEntry::kMustNotExist + || kSc == kDbEntry::kCanExist, + "Unexpected status check value"); + } + + template + inline expected::Result executeOperation( + RocksDbCommon &common, + OperationDescribtionF &&op_formatter, + Args &&... args) { + rocksdb::Status status; + if constexpr (kOp == kDbOperation::kGet || kOp == kDbOperation::kCheck) + status = common.get(std::forward(args)...); + else if constexpr (kOp == kDbOperation::kPut) + status = common.put(std::forward(args)...); + else if constexpr (kOp == kDbOperation::kDel) + status = common.del(std::forward(args)...); + + static_assert(kOp == kDbOperation::kGet || kOp == kDbOperation::kCheck + || kOp == kDbOperation::kPut || kOp == kDbOperation::kDel, + "Unexpected operation value!"); + + static_assert( + kOp != kDbOperation::kDel || kSc != kDbEntry::kMustExist, + "Delete operation does not report if key existed before deletion!"); + + RDB_ERROR_CHECK(checkStatus( + status, std::forward(op_formatter))); + return status; + } + + template ::value>> + inline std::optional loadValue( + RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) { + uint64_t _; + common.decode(_); + value = _; + } + } + return value; + } + + template < + kDbOperation kOp, + typename T, + typename = std::enable_if_t::value>> + inline std::optional loadValue( + RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) + value = common.valueBuffer(); + } + return value; + } + + template < + kDbOperation kOp, + typename T, + typename = std::enable_if_t< + std::is_same::value>> + inline std::optional loadValue( + RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) + value = + shared_model::interface::RolePermissionSet{common.valueBuffer()}; + } + return value; + } + + template ::value>> + inline std::optional loadValue( + RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) { + auto const &[major, minor, patch] = + staticSplitId<3ull>(common.valueBuffer(), "#"); + IrohadVersion version{0ul, 0ul, 0ul}; + std::from_chars( + major.data(), major.data() + major.size(), version.major); + std::from_chars( + minor.data(), minor.data() + minor.size(), version.minor); + std::from_chars( + patch.data(), patch.data() + patch.size(), version.patch); + value = version; + } + } + return value; + } + + template ::value>> + inline std::optional loadValue( + RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) + value.emplace(common.valueBuffer()); + } + return value; + } + + template ::value>> + inline std::optional + loadValue(RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) + value = shared_model::interface::GrantablePermissionSet{ + common.valueBuffer()}; + } + return value; + } + + template ::value>> + inline std::optional loadValue( + RocksDbCommon &common, + expected::Result const &status) { + std::optional value; + if constexpr (kOp == kDbOperation::kGet) { + assert(expected::hasValue(status)); + if (status.assumeValue().ok()) + value = true; + } + return value; + } + + template + inline expected::Result, DbError> dbCall( + RocksDbCommon &common, Args &&... args) { + auto status = executeOperation( + common, + [&] { return fmt::format(std::forward(args)...); }, + std::forward(args)...); + RDB_ERROR_CHECK(status); + return loadValue(common, status); + } + + /** + * Access to account details count. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @return operation result + */ + template + inline expected::Result, DbError> + forAccountDetailsCount(RocksDbCommon &common, + std::string_view account, + std::string_view domain) { + return dbCall( + common, fmtstrings::kAccountDetailsCount, domain, account); + } + + /** + * Access to store version. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @return operation result + */ + template + inline expected::Result, DbError> + forStoreVersion(RocksDbCommon &common) { + return dbCall(common, fmtstrings::kStoreVersion); + } + + /** + * Access to WSV version. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @return operation result + */ + template + inline expected::Result, DbError> forWSVVersion( + RocksDbCommon &common) { + return dbCall(common, fmtstrings::kWsvVersion); + } + + /** + * Access to Stored blocks data. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param height of the block + * @return operation result + */ + template + inline expected::Result, DbError> forBlock( + RocksDbCommon &common, uint64_t height) { + return dbCall( + common, fmtstrings::kBlockDataInStore, height); + } + + /** + * Access to Block store size. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @return operation result + */ + template + inline expected::Result, DbError> forBlocksTotalCount( + RocksDbCommon &common) { + return dbCall(common, fmtstrings::kBlocksTotalCount); + } + + /** + * Access to account quorum file. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @return operation result + */ + template + inline expected::Result, DbError> forQuorum( + RocksDbCommon &common, + std::string_view account, + std::string_view domain) { + return dbCall( + common, fmtstrings::kQuorum, domain, account); + } + + /** + * Access to account's txs total count. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param account_id name + * @return operation result + */ + template + inline expected::Result, DbError> forTxsTotalCount( + RocksDbCommon &common, std::string_view account_id) { + return dbCall( + common, fmtstrings::kTxsTotalCount, account_id); + } + + /** + * Access to all txs total count. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @return operation result + */ + template + inline expected::Result, DbError> forTxsTotalCount( + RocksDbCommon &common) { + return dbCall(common, fmtstrings::kAllTxsTotalCount); + } + + /** + * Access to domains total count. + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @return operation result + */ + template + inline expected::Result, DbError> + forDomainsTotalCount(RocksDbCommon &common) { + return dbCall(common, fmtstrings::kDomainsTotalCount); + } + + /** + * Access to account folder + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @return operation result + */ + template + inline auto forAccount(RocksDbCommon &common, + std::string_view account, + std::string_view domain) { + return forQuorum(common, account, domain); + } + + /** + * Access to role file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param role id + * @return operation result + */ + template + inline expected:: + Result, DbError> + forRole(RocksDbCommon &common, std::string_view role) { + return dbCall( + common, fmtstrings::kRole, role); + } + + /** + * Access to peers count file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @return operation result + */ + template + inline expected::Result, DbError> forPeersCount( + RocksDbCommon &common) { + return dbCall(common, fmtstrings::kPeersCount); + } + + /** + * Access to transactions statuses + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param tx_hash is a current transaction hash + * @return operation result + */ + template + inline expected::Result, DbError> + forTransactionStatus(RocksDbCommon &common, std::string_view tx_hash) { + return dbCall( + common, fmtstrings::kTransactionStatus, tx_hash); + } + + /** + * Access to transactions by position + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param account name + * @param height of the block + * @param index of the transaction + * @return operation result + */ + template + inline expected::Result, DbError> + forTransactionByPosition(RocksDbCommon &common, + std::string_view account, + uint64_t ts, + uint64_t height, + uint64_t index) { + return dbCall( + common, fmtstrings::kTransactionByPosition, account, height, index, ts); + } + + /** + * Access to transactions by timestamp + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param account name + * @param ts is a transaction timestamp + * @return operation result + */ + template + inline expected::Result, DbError> + forTransactionByTimestamp(RocksDbCommon &common, + std::string_view account, + uint64_t ts, + uint64_t height, + uint64_t index) { + return dbCall( + common, fmtstrings::kTransactionByTs, account, ts, height, index); + } + + /** + * Access to setting file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param key setting name + * @return operation result + */ + template + inline expected::Result, DbError> forSettings( + RocksDbCommon &common, std::string_view key) { + return dbCall( + common, fmtstrings::kSetting, key); + } + + /** + * Access to peer address file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param pubkey public key of the peer + * @return operation result + */ + template + inline expected::Result, DbError> + forPeerAddress(RocksDbCommon &common, std::string_view pubkey) { + return dbCall( + common, fmtstrings::kPeerAddress, pubkey); + } + + /** + * Access to peer TLS file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param pubkey is a public key of the peer + * @return operation result + */ + template + inline expected::Result, DbError> forPeerTLS( + RocksDbCommon &common, std::string_view pubkey) { + return dbCall( + common, fmtstrings::kPeerTLS, pubkey); + } + + /** + * Access to asset file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain is + * @param asset name + * @return operation result + */ + template + inline expected::Result, DbError> forAsset( + RocksDbCommon &common, std::string_view asset, std::string_view domain) { + return dbCall( + common, fmtstrings::kAsset, domain, asset); + } + + /** + * Access to top blocks height and hash + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @tparam F callback with operation result + * @param common @see RocksDbCommon + * @param func callback with the result + * @return determined by the callback + */ + template + expected::Result, DbError> forTopBlockInfo( + RocksDbCommon &common) { + return dbCall(common, fmtstrings::kTopBlock); + } + + /** + * Access to account role file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @param role id + * @return operation result + */ + template + inline expected::Result, DbError> forAccountRole( + RocksDbCommon &common, + std::string_view account, + std::string_view domain, + std::string_view role) { + return dbCall( + common, fmtstrings::kAccountRole, domain, account, role); + } + + /** + * Access to account details file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @param creator_domain id + * @param creator_account name + * @param key name of the details entry + * @return operation result + */ + template + inline expected::Result, DbError> + forAccountDetail(RocksDbCommon &common, + std::string_view account, + std::string_view domain, + std::string_view creator_id, + std::string_view key) { + return dbCall( + common, fmtstrings::kAccountDetail, domain, account, creator_id, key); + } + + /** + * Access to account signatory file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @param pubkey public key of the signatory + * @return operation result + */ + template + inline expected::Result, DbError> forSignatory( + RocksDbCommon &common, + std::string_view account, + std::string_view domain, + std::string_view pubkey) { + return dbCall( + common, fmtstrings::kSignatory, domain, account, pubkey); + } + + /** + * Access to domain file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @return operation result + */ + template + inline expected::Result, DbError> forDomain( + RocksDbCommon &common, std::string_view domain) { + return dbCall( + common, fmtstrings::kDomain, domain); + } + + /** + * Access to account size file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @return operation result + */ + template + inline expected::Result, DbError> forAccountAssetSize( + RocksDbCommon &common, + std::string_view account, + std::string_view domain) { + return dbCall( + common, fmtstrings::kAccountAssetSize, domain, account); + } + + /** + * Access to account assets file + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @param asset name + * @return operation result + */ + template + inline expected::Result, + DbError> + forAccountAsset(RocksDbCommon &common, + std::string_view account, + std::string_view domain, + std::string_view asset) { + return dbCall( + common, fmtstrings::kAccountAsset, domain, account, asset); + } + + /** + * Access to account grantable permissions + * @tparam kOp @see kDbOperation + * @tparam kSc @see kDbEntry + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @param grantee_domain id + * @param grantee_account name + * @return operation result + */ + template + inline expected::Result< + std::optional, + DbError> + forGrantablePermissions(RocksDbCommon &common, + std::string_view account, + std::string_view domain, + std::string_view grantee_account_id) { + return dbCall( + common, fmtstrings::kGranted, domain, account, grantee_account_id); + } + + /** + * Get all permissions for the account + * @param common @see RocksDbCommon + * @param domain id + * @param account name + * @return permission set for the account + */ + inline expected::Result + accountPermissions(RocksDbCommon &common, + std::string_view account, + std::string_view domain) { + assert(!domain.empty()); + assert(!account.empty()); + + /// TODO(iceseer): remove this vector(some kind of stack allocator) + /// or to store key prefix value and make another db call inside lambda + std::vector roles; + auto status = enumerateKeys(common, + [&](auto role) { + if (!role.empty()) + roles.emplace_back(role.ToStringView()); + else { + assert(!"Role can not be empty string!"); + } + return true; + }, + fmtstrings::kPathAccountRoles, + domain, + account); + + if (!status.ok()) + return makeError( + DbErrorCode::kNoAccount, + "Enumerate account {}@{} roles failed with status: {}.", + account, + domain, + status.ToString()); + + shared_model::interface::RolePermissionSet permissions; + for (auto &role : roles) { + auto opt_perm = + forRole(common, role); + RDB_ERROR_CHECK(opt_perm); + permissions |= *opt_perm.assumeValue(); + } + + return permissions; + } + + inline expected::Result checkPermissions( + shared_model::interface::RolePermissionSet const &permissions, + shared_model::interface::permissions::Role const to_check) { + if (permissions.isSet(to_check)) + return {}; + + return makeError(DbErrorCode::kErrorNoPermissions, "No permissions."); + } + + inline expected::Result checkPermissions( + std::string_view domain_id, + std::string_view creator_domain_id, + shared_model::interface::RolePermissionSet const &permissions, + shared_model::interface::permissions::Role const all, + shared_model::interface::permissions::Role const domain) { + if (permissions.isSet(all)) + return {}; + + if (domain_id == creator_domain_id && permissions.isSet(domain)) + return {}; + + return makeError(DbErrorCode::kErrorNoPermissions, "No permissions."); + } + + inline expected::Result checkGrantablePermissions( + shared_model::interface::RolePermissionSet const &permissions, + shared_model::interface::GrantablePermissionSet const + &grantable_permissions, + shared_model::interface::permissions::Grantable const granted) { + if (grantable_permissions.isSet(granted) + || permissions.isSet(shared_model::interface::permissions::Role::kRoot)) + return {}; + + return makeError(DbErrorCode::kErrorNoPermissions, "No permissions."); + } + + inline expected::Result checkPermissions( + shared_model::interface::RolePermissionSet const &permissions, + shared_model::interface::GrantablePermissionSet const + &grantable_permissions, + shared_model::interface::permissions::Role const role, + shared_model::interface::permissions::Grantable const granted) { + if (permissions.isSet(role)) + return {}; + + if (grantable_permissions.isSet(granted)) + return {}; + + return makeError(DbErrorCode::kErrorNoPermissions, "No permissions."); + } + + inline expected::Result checkPermissions( + std::string_view domain_id, + std::string_view creator_domain_id, + std::string_view qry_account_id, + std::string_view creator_id, + shared_model::interface::RolePermissionSet const &permissions, + shared_model::interface::permissions::Role const all, + shared_model::interface::permissions::Role const domain, + shared_model::interface::permissions::Role const my) { + if (permissions.isSet(all)) + return {}; + + if (domain_id == creator_domain_id && permissions.isSet(domain)) + return {}; + + if (qry_account_id == creator_id && permissions.isSet(my)) + return {}; + + return makeError(DbErrorCode::kErrorNoPermissions, "No permissions."); + } + + struct PaginationContext { + struct FirstEntry { + std::string writer_from; + std::string key_from; + }; + + std::optional first; + uint64_t page_size; + }; + + inline expected::Result aggregateAccountDetails( + RocksDbCommon &common, + std::string_view account, + std::string_view domain, + uint64_t &total, + std::string_view writer_filter = std::string_view{}, + std::string_view key_filter = std::string_view{}, + std::optional pagination = std::nullopt, + std::string *next_writer = nullptr, + std::string *next_key = nullptr) { + std::string result = "{"; + std::string prev_writer; + + auto remains = pagination ? pagination->page_size + 1ull + : std::numeric_limits::max(); + bool found = !pagination || !pagination->first; + bool have_entries = false; + + // TODO(iceseer): find first entry by log(N) + total = 0ull; + auto status = ametsuchi::enumerateKeysAndValues( + common, + [&](auto path, auto value) { + auto const &[cur_writer, _, cur_key] = + staticSplitId<3>(path.ToStringView(), fmtstrings::kDelimiter); + + have_entries = true; + if (!writer_filter.empty() && cur_writer != writer_filter) + return true; + if (!key_filter.empty() && cur_key != key_filter) + return true; + + ++total; + if (!found) { + if (cur_writer != pagination->first->writer_from + || cur_key != pagination->first->key_from) + return true; + found = true; + } + + if (remains == 0ull) { + return true; + } else if (remains-- == 1ull) { + if (next_writer != nullptr) + *next_writer = cur_writer; + if (next_key != nullptr) + *next_key = cur_key; + return true; + } + + if (prev_writer != cur_writer) { + if (prev_writer.empty()) + result += '\"'; + else + result += "},\""; + result += cur_writer; + result += "\": {"; + prev_writer = cur_writer; + } else + result += ", "; + + result += '\"'; + result += cur_key; + result += "\": \""; + result += value.ToStringView(); + result += '\"'; + + return true; + }, + fmtstrings::kPathAccountDetail, + domain, + account); + RDB_ERROR_CHECK(canExist(status, [&]() { + return fmt::format("Aggregate account {}@{} data", account, domain); + })); + + if (!found && have_entries) + return makeError(DbErrorCode::kInvalidPagination, + "Invalid pagination."); + + result += result.size() == 1ull ? "}" : "}}"; + return result; + } + + inline expected::Result dropWSV(RocksDbCommon &common) { + if (auto status = common.filterDelete(fmtstrings::kPathWsv); !status.ok()) + return makeError(DbErrorCode::kOperationFailed, + "Clear WSV failed."); + return {}; + } + +} // namespace iroha::ametsuchi + +#endif diff --git a/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp b/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp new file mode 100644 index 00000000000..19c40ed230a --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp @@ -0,0 +1,71 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_DB_TRANSACTION_HPP +#define IROHA_ROCKSDB_DB_TRANSACTION_HPP + +#include "ametsuchi/impl/db_transaction.hpp" + +#include "ametsuchi/impl/rocksdb_common.hpp" + +namespace iroha::ametsuchi { + + class RocksDbTransaction final : public DatabaseTransaction { + public: + RocksDbTransaction(RocksDbTransaction const &) = delete; + RocksDbTransaction(RocksDbTransaction &&) = delete; + + RocksDbTransaction &operator=(RocksDbTransaction const &) = delete; + RocksDbTransaction &operator=(RocksDbTransaction &&) = delete; + + RocksDbTransaction(std::shared_ptr tx_context) + : tx_context_(std::move(tx_context)) { + assert(tx_context_); + } + + void begin() override {} + + void savepoint(std::string const &) override { + RocksDbCommon common(tx_context_); + common.savepoint(); + } + + void releaseSavepoint(std::string const &) override { + RocksDbCommon common(tx_context_); + common.release(); + } + + void commit() override { + RocksDbCommon common(tx_context_); + common.commit(); + } + + void rollback() override { + RocksDbCommon common(tx_context_); + common.rollback(); + } + + void prepare(std::string const &) override { + RocksDbCommon common(tx_context_); + common.prepare(); + } + + void commitPrepared(std::string const &) override { + RocksDbCommon common(tx_context_); + common.commit(); + } + + void rollbackToSavepoint(std::string const &) override { + RocksDbCommon common(tx_context_); + common.rollbackToSavepoint(); + } + + private: + std::shared_ptr tx_context_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_ROCKSDB_DB_TRANSACTION_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_indexer.cpp b/irohad/ametsuchi/impl/rocksdb_indexer.cpp new file mode 100644 index 00000000000..a99e10d96ef --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_indexer.cpp @@ -0,0 +1,101 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_indexer.hpp" + +#include + +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "common/to_lower.hpp" +#include "cryptography/hash.hpp" + +using namespace iroha::ametsuchi; +using namespace shared_model::interface::types; + +RocksDBIndexer::RocksDBIndexer(std::shared_ptr db_context) + : db_context_(std::move(db_context)) {} + +void RocksDBIndexer::txHashStatus(const TxPosition &position, + TimestampType const ts, + const HashType &tx_hash, + bool is_committed) { + RocksDbCommon common(db_context_); + common.valueBuffer() = is_committed ? "TRUE" : "FALSE"; + common.valueBuffer() += '#'; + common.valueBuffer() += std::to_string(position.height); + common.valueBuffer() += '#'; + common.valueBuffer() += std::to_string(position.index); + common.valueBuffer() += '#'; + common.valueBuffer() += std::to_string(ts); + + std::string h_hex; + forTransactionStatus(common, + toLowerAppend(tx_hash.hex(), h_hex)); + + if (is_committed) { + uint64_t txs_count = 0ull; + if (auto result = + forTxsTotalCount(common); + expected::hasValue(result) && result.assumeValue()) + txs_count = *result.assumeValue(); + + common.encode(txs_count + 1ull); + forTxsTotalCount(common); + } +} + +void RocksDBIndexer::committedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const HashType &committed_tx_hash) { + txHashStatus(position, ts, committed_tx_hash, true); +} + +void RocksDBIndexer::rejectedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const HashType &rejected_tx_hash) { + txHashStatus(position, ts, rejected_tx_hash, false); +} + +void RocksDBIndexer::txPositions( + shared_model::interface::types::AccountIdType const &account, + HashType const &hash, + boost::optional &&asset_id, + TimestampType const ts, + TxPosition const &position) { + RocksDbCommon common(db_context_); + + if (auto res = forTransactionByPosition( + common, account, ts, position.height, position.index); + expected::hasError(res)) + return; + + std::string h_hex; + common.valueBuffer().assign(fmt::format( + "{}%{}", asset_id ? *asset_id : "", toLowerAppend(hash.hex(), h_hex))); + + forTransactionByPosition( + common, account, ts, position.height, position.index); + forTransactionByTimestamp( + common, account, ts, position.height, position.index); + + uint64_t txs_count = 0ull; + if (auto result = forTxsTotalCount( + common, account); + expected::hasValue(result) && result.assumeValue()) + txs_count = *result.assumeValue(); + + common.encode(txs_count + 1ull); + forTxsTotalCount(common, account); +} + +iroha::expected::Result RocksDBIndexer::flush() { + RocksDbCommon common(db_context_); + if (!common.commit().ok()) + return expected::makeError("Unable to flush transactions data."); + return {}; +} diff --git a/irohad/ametsuchi/impl/rocksdb_indexer.hpp b/irohad/ametsuchi/impl/rocksdb_indexer.hpp new file mode 100644 index 00000000000..f5d913b8a99 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_indexer.hpp @@ -0,0 +1,54 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef AMETSUCHI_ROCKSDB_INDEXER_HPP +#define AMETSUCHI_ROCKSDB_INDEXER_HPP + +#include "ametsuchi/indexer.hpp" + +#include +#include + +namespace iroha::ametsuchi { + + struct RocksDBPort; + struct RocksDBContext; + + class RocksDBIndexer final : public Indexer { + public: + RocksDBIndexer(std::shared_ptr db_context); + + void committedTxHash(const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const shared_model::interface::types::HashType + &committed_tx_hash) override; + + void rejectedTxHash(const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const shared_model::interface::types::HashType + &rejected_tx_hash) override; + + void txPositions( + shared_model::interface::types::AccountIdType const &account, + shared_model::interface::types::HashType const &hash, + boost::optional &&asset_id, + shared_model::interface::types::TimestampType const ts, + TxPosition const &position) override; + + iroha::expected::Result flush() override; + + private: + std::shared_ptr db_context_; + + /// Index tx status by its hash. + void txHashStatus(const TxPosition &position, + shared_model::interface::types::TimestampType const ts, + const shared_model::interface::types::HashType &tx_hash, + bool is_committed); + }; + +} // namespace iroha::ametsuchi + +#endif // AMETSUCHI_ROCKSDB_INDEXER_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_options.hpp b/irohad/ametsuchi/impl/rocksdb_options.hpp new file mode 100644 index 00000000000..0a49660453c --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_options.hpp @@ -0,0 +1,28 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_OPTIONS_HPP +#define IROHA_ROCKSDB_OPTIONS_HPP + +namespace iroha::ametsuchi { + + /** + * Type for convenient formatting of RocksDB. + */ + class RocksDbOptions final { + const std::string db_path_; + + public: + explicit RocksDbOptions(std::string_view db_path) : db_path_(db_path) {} + + public: + const std::string &dbPath() const { + return db_path_; + } + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_OPTIONS_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_query_executor.cpp new file mode 100644 index 00000000000..e7e0e502d38 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_query_executor.cpp @@ -0,0 +1,59 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_query_executor.hpp" + +#include +#include +#include "ametsuchi/impl/rocksdb_specific_query_executor.hpp" +#include "interfaces/iroha_internal/query_response_factory.hpp" +#include "interfaces/queries/blocks_query.hpp" +#include "interfaces/queries/query.hpp" +#include "logger/logger.hpp" + +using namespace shared_model::interface::permissions; + +namespace iroha::ametsuchi { + + RocksDbQueryExecutor::RocksDbQueryExecutor( + std::shared_ptr + response_factory, + std::shared_ptr specific_query_executor, + logger::LoggerPtr log) + : QueryExecutorBase(std::move(response_factory), + specific_query_executor, + std::move(log)), + tx_context_(specific_query_executor->getTxContext()) {} + + bool RocksDbQueryExecutor::validateSignatures( + const shared_model::interface::Query &query) { + return validateSignaturesImpl(query); + } + + bool RocksDbQueryExecutor::validateSignatures( + const shared_model::interface::BlocksQuery &query) { + return validateSignaturesImpl(query); + } + + template + bool RocksDbQueryExecutor::validateSignaturesImpl(const Q &query) { + auto const &[account, domain] = staticSplitId<2>(query.creatorAccountId()); + RocksDbCommon common(tx_context_); + + for (auto &signatory : query.signatures()) + if (auto result = + forSignatory( + common, account, domain, signatory.publicKey()); + expected::hasError(result)) { + log_->error("code:{}, description:{}", + result.assumeError().code, + result.assumeError().description); + return false; + } + + return true; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_query_executor.hpp b/irohad/ametsuchi/impl/rocksdb_query_executor.hpp new file mode 100644 index 00000000000..ebb0c9fe420 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_query_executor.hpp @@ -0,0 +1,45 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_QUERY_EXECUTOR_HPP +#define IROHA_ROCKSDB_QUERY_EXECUTOR_HPP + +#include "ametsuchi/impl/query_executor_base.hpp" + +#include "logger/logger_fwd.hpp" + +namespace shared_model { + namespace interface { + class QueryResponseFactory; + } // namespace interface +} // namespace shared_model + +namespace iroha::ametsuchi { + + class RocksDbSpecificQueryExecutor; + struct RocksDBContext; + + class RocksDbQueryExecutor : public QueryExecutorBase { + public: + RocksDbQueryExecutor( + std::shared_ptr + response_factory, + std::shared_ptr specific_query_executor, + logger::LoggerPtr log); + + bool validateSignatures( + const shared_model::interface::Query &query) override; + bool validateSignatures( + const shared_model::interface::BlocksQuery &query) override; + + private: + template + bool validateSignaturesImpl(const Q &query); + std::shared_ptr tx_context_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_QUERY_EXECUTOR_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_settings_query.cpp b/irohad/ametsuchi/impl/rocksdb_settings_query.cpp new file mode 100644 index 00000000000..78480de2a95 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_settings_query.cpp @@ -0,0 +1,74 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_settings_query.hpp" + +#include +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "interfaces/common_objects/types.hpp" +#include "logger/logger.hpp" + +using namespace iroha; +using namespace iroha::ametsuchi; + +namespace { + + expected::Result getValueFromDb( + std::shared_ptr db_context, + const shared_model::interface::types::SettingKeyType &key, + uint64_t &destination) { + RocksDbCommon common(db_context); + auto status = common.get(fmtstrings::kSetting, kMaxDescriptionSizeKey); + + if (auto result = iroha::ametsuchi::canExist( + status, [&] { return fmt::format("Max description size key"); }); + expected::hasError(result)) + return expected::makeError(result.assumeError().description); + + if (status.ok()) { + common.decode(destination); + return true; + } + + return false; + } + +} // namespace + +namespace iroha::ametsuchi { + + RocksDbSettingQuery::RocksDbSettingQuery( + std::shared_ptr db_context, logger::LoggerPtr log) + : db_context_(std::move(db_context)), log_(std::move(log)) {} + + iroha::expected::Result< + std::unique_ptr, + std::string> + RocksDbSettingQuery::get() { + return update(shared_model::validation::getDefaultSettings()); + } + + iroha::expected::Result< + std::unique_ptr, + std::string> + RocksDbSettingQuery::update( + std::unique_ptr base) { + uint64_t value; + if (auto res = getValueFromDb(db_context_, kMaxDescriptionSizeKey, value); + expected::hasError(res)) + return expected::makeError(res.assumeError()); + else if (res.assumeValue()) { + base->max_description_size = static_cast(value); + log_->info("Updated value for " + kMaxDescriptionSizeKey + ": {}", + base->max_description_size); + } else { + log_->info("Kept value for " + kMaxDescriptionSizeKey + ": {}", + base->max_description_size); + } + + return base; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_settings_query.hpp b/irohad/ametsuchi/impl/rocksdb_settings_query.hpp new file mode 100644 index 00000000000..a6eda0af1f7 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_settings_query.hpp @@ -0,0 +1,40 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_SETTING_QUERY_HPP +#define IROHA_ROCKSDB_SETTING_QUERY_HPP + +#include "ametsuchi/setting_query.hpp" + +#include "logger/logger_fwd.hpp" + +namespace iroha::ametsuchi { + + struct RocksDBContext; + + /** + * Class which implements SettingQuery with a RocksDB backend. + */ + class RocksDbSettingQuery : public SettingQuery { + public: + RocksDbSettingQuery(std::shared_ptr db_context, + logger::LoggerPtr log); + + expected::Result, + std::string> + get() override; + + private: + expected::Result, + std::string> + update(std::unique_ptr base); + + std::shared_ptr db_context_; + logger::LoggerPtr log_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_POSTGRES_SETTING_QUERY_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp new file mode 100644 index 00000000000..14d2b7148f6 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp @@ -0,0 +1,981 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_specific_query_executor.hpp" + +#include +#include +#include +#include +#include "ametsuchi/block_storage.hpp" +#include "ametsuchi/impl/executor_common.hpp" +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "backend/plain/account_detail_record_id.hpp" +#include "backend/plain/peer.hpp" +#include "common/bind.hpp" +#include "common/common.hpp" +#include "common/to_lower.hpp" +#include "interfaces/common_objects/amount.hpp" +#include "interfaces/queries/asset_pagination_meta.hpp" +#include "interfaces/queries/get_account.hpp" +#include "interfaces/queries/get_account_asset_transactions.hpp" +#include "interfaces/queries/get_account_assets.hpp" +#include "interfaces/queries/get_account_detail.hpp" +#include "interfaces/queries/get_account_transactions.hpp" +#include "interfaces/queries/get_asset_info.hpp" +#include "interfaces/queries/get_block.hpp" +#include "interfaces/queries/get_engine_receipts.hpp" +#include "interfaces/queries/get_peers.hpp" +#include "interfaces/queries/get_pending_transactions.hpp" +#include "interfaces/queries/get_role_permissions.hpp" +#include "interfaces/queries/get_roles.hpp" +#include "interfaces/queries/get_signatories.hpp" +#include "interfaces/queries/get_transactions.hpp" +#include "interfaces/queries/query.hpp" +#include "interfaces/queries/tx_pagination_meta.hpp" +#include "interfaces/transaction.hpp" +#include "pending_txs_storage/pending_txs_storage.hpp" + +using namespace iroha; +using namespace iroha::ametsuchi; + +namespace { + struct PaginationBounds { + using HeightType = shared_model::interface::types::HeightType; + using TimestampType = shared_model::interface::types::TimestampType; + + HeightType heightFrom; + HeightType heightTo; + + TimestampType tsFrom; + TimestampType tsTo; + }; +} // namespace + +using ErrorQueryType = + shared_model::interface::QueryResponseFactory::ErrorQueryType; + +using shared_model::interface::permissions::Role; + +using shared_model::interface::RolePermissionSet; + +RocksDbSpecificQueryExecutor::RocksDbSpecificQueryExecutor( + std::shared_ptr db_context, + BlockStorage &block_store, + std::shared_ptr pending_txs_storage, + std::shared_ptr + response_factory, + std::shared_ptr perm_converter) + : db_context_(std::move(db_context)), + block_store_(block_store), + pending_txs_storage_(std::move(pending_txs_storage)), + query_response_factory_{std::move(response_factory)}, + perm_converter_(std::move(perm_converter)) { + assert(db_context_); +} + +std::shared_ptr RocksDbSpecificQueryExecutor::getTxContext() { + return db_context_; +} + +QueryExecutorResult RocksDbSpecificQueryExecutor::execute( + const shared_model::interface::Query &qry) { + return boost::apply_visitor( + [this, &qry](const auto &query) { + auto &query_hash = qry.hash(); + try { + RocksDbCommon common(db_context_); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(qry.creatorAccountId()); + + // get account permissions + if (auto perm_result = + accountPermissions(common, account_name, domain_id); + expected::hasError(perm_result)) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kStatefulFailed, + fmt::format("Query: {}, message: {}", + query.toString(), + perm_result.assumeError().description), + perm_result.assumeError().code, + query_hash); + else if (auto result = (*this)(common, + query, + qry.creatorAccountId(), + query_hash, + perm_result.assumeValue()); + expected::hasError(result)) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kStatefulFailed, + fmt::format("Query: {}, message: {}", + query.toString(), + result.assumeError().description), + result.assumeError().code, + query_hash); + else + return std::move(result.assumeValue()); + } catch (std::exception &e) { + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kStatefulFailed, + fmt::format("Query: {}, message: {}", query.toString(), e.what()), + 1001, + query_hash); + } + }, + qry.get()); +} + +bool RocksDbSpecificQueryExecutor::hasAccountRolePermission( + shared_model::interface::permissions::Role permission, + const std::string &account_id) const { + RocksDbCommon common(db_context_); + + auto const &[account_name, domain_id] = staticSplitId<2ull>(account_id); + if (auto account_permissions = + accountPermissions(common, account_name, domain_id); + expected::hasValue(account_permissions)) + return account_permissions.assumeValue().isSet(permission); + + return false; +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccount &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(query.accountId()); + + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + query.accountId(), + creator_id, + creator_permissions, + Role::kGetAllAccounts, + Role::kGetDomainAccounts, + Role::kGetMyAccount)); + + uint64_t quorum; + if (auto result = forQuorum( + common, account_name, domain_id); + expected::hasError(result)) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kNoAccount, + fmt::format("Query: {}, message: {}", + query.toString(), + result.assumeError().description), + result.assumeError().code, + query_hash); + else + quorum = *result.assumeValue(); + + uint64_t total; + RDB_TRY_GET_VALUE( + details, aggregateAccountDetails(common, account_name, domain_id, total)); + + std::vector roles; + auto status = + ametsuchi::enumerateKeys(common, + [&](auto role) { + roles.emplace_back(role.ToStringView()); + return true; + }, + fmtstrings::kPathAccountRoles, + domain_id, + account_name); + RDB_ERROR_CHECK(canExist(status, [&]() { + return fmt::format("Enumerate roles for account {}", query.accountId()); + })); + + return query_response_factory_->createAccountResponse( + query.accountId(), + shared_model::interface::types::DomainIdType(domain_id), + quorum, + details, + roles, + query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetBlock &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kGetBlocks)); + + auto const ledger_height = block_store_.size(); + if (query.height() > ledger_height) + return makeError( + ErrorCodes::kQueryHeightOverflow, + "requested height ({}) is greater than the ledger's one ({})", + std::to_string(query.height()), + std::to_string(ledger_height)); + + auto block_deserialization_msg = [height = query.height()] { + return "could not retrieve block with given height: " + + std::to_string(height); + }; + auto block = block_store_.fetch(query.height()); + if (!block) + return makeError(ErrorCodes::kFetchBlockFailed, + "Block deserialization error: {}", + block_deserialization_msg()); + + return query_response_factory_->createBlockResponse(std::move(*block), + query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetSignatories &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(query.accountId()); + + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + query.accountId(), + creator_id, + creator_permissions, + Role::kGetAllSignatories, + Role::kGetDomainSignatories, + Role::kGetMySignatories)); + + std::vector signatories; + auto const status = + enumerateKeys(common, + [&](auto const &signatory) { + signatories.emplace_back(signatory.ToStringView()); + return true; + }, + fmtstrings::kPathSignatories, + domain_id, + account_name); + RDB_ERROR_CHECK(canExist(status, [&]() { + return fmt::format("Enumerate signatories for account {}", + query.accountId()); + })); + + if (signatories.empty()) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kNoSignatories, + fmt::format("{}, status: not found", query.toString()), + 0, + query_hash); + + return query_response_factory_->createSignatoriesResponse(signatories, + query_hash); +} + +struct TxPosition { + uint64_t ts; + uint64_t height; + uint64_t index; +}; + +inline void decodePosition(std::string_view ts, + std::string_view height, + std::string_view index, + TxPosition &out) { + std::from_chars(ts.data(), ts.data() + ts.size(), out.ts); + std::from_chars(height.data(), height.data() + height.size(), out.height); + std::from_chars(index.data(), index.data() + index.size(), out.index); +} + +template +iroha::expected::Result +RocksDbSpecificQueryExecutor::getTransactionsFromBlock(uint64_t block_id, + uint64_t tx_index, + Pred &&pred, + OutputIterator dest_it) { + auto opt_block = block_store_.fetch(block_id); + if (not opt_block) { + return iroha::expected::makeError( + fmt::format("Failed to retrieve block with id {}", block_id)); + } + auto &block = opt_block.value(); + + const auto block_size = block->transactions().size(); + auto const tx_id = tx_index; + if (tx_id >= block_size) + return iroha::expected::makeError( + fmt::format("Failed to retrieve transaction with id {} " + "from block height {}.", + tx_id, + block_id)); + + auto &tx = block->transactions()[tx_id]; + if (pred(tx)) { + *dest_it++ = tx.moveTo(); + } + + return {}; +} + +template +RocksDbSpecificQueryExecutor::ExecutionResult +RocksDbSpecificQueryExecutor::readTxs( + RocksDbCommon &common, + std::shared_ptr + &query_response_factory, + const Qry &query, + const shared_model::interface::types::HashType &query_hash) { + auto &ordering = query.paginationMeta().ordering(); + shared_model::interface::Ordering::OrderingEntry const *ordering_ptr = + nullptr; + size_t count = 0ull; + ordering.get(ordering_ptr, count); + assert(count > 0ull); + + RDB_TRY_GET_VALUE(opt_txs_total, + forTxsTotalCount( + common, query.accountId())); + + std::vector> + response_txs; + uint64_t remains = query.paginationMeta().pageSize() + 1ull; + std::optional next_page; + + static_assert( + std::is_same_v< + typename decltype(query.paginationMeta().firstTxTime())::value_type, + typename decltype(query.paginationMeta().lastTxTime())::value_type>, + "Type of firstTxTime and lastTxTime must be the same!"); + static_assert( + std::is_same_v, + "Type of firstTxTime and lastTxTime must be the same!"); + + static_assert( + std::is_same_v< + typename decltype(query.paginationMeta().firstTxHeight())::value_type, + typename decltype(query.paginationMeta().lastTxHeight())::value_type>, + "Height types must be the same!"); + static_assert( + std::is_same_v, + "Height types must be the same!"); + + PaginationBounds const bounds{ + query.paginationMeta().firstTxHeight().value_or( + shared_model::interface::types::HeightType(1ull)), + query.paginationMeta().lastTxHeight().value_or( + std::numeric_limits::max()), + query.paginationMeta().firstTxTime().value_or( + std::numeric_limits::min()), + query.paginationMeta().lastTxTime().value_or( + std::numeric_limits::max())}; + + auto parser = [&](auto p, auto d) { + auto const &[asset, tx_hash] = staticSplitId<2ull>(d.ToStringView(), "%"); + if (readTxsWithAssets) + if (asset.empty()) + return true; + + auto const position = + staticSplitId<5ull>(p.ToStringView(), fmtstrings::kDelimiter); + + TxPosition tx_position = {0ull, 0ull, 0ull}; + if (ordering_ptr->field + == shared_model::interface::Ordering::Field::kCreatedTime) + decodePosition( + position.at(0), position.at(2), position.at(4), tx_position); + else + decodePosition( + position.at(4), position.at(0), position.at(2), tx_position); + + static_assert( + std::is_unsigned_v && std::is_unsigned_v, + "Height must be unsigned"); + if ((tx_position.height - bounds.heightFrom) + > (bounds.heightTo - bounds.heightFrom)) + return true; + + static_assert( + std::is_unsigned_v && std::is_unsigned_v, + "TS must be unsigned"); + if ((tx_position.ts - bounds.tsFrom) > (bounds.tsTo - bounds.tsFrom)) + return true; + + // get transactions corresponding to indexes + if (remains-- > 1ull) { + auto txs_result = + getTransactionsFromBlock(tx_position.height, + tx_position.index, + [](auto &) { return true; }, + std::back_inserter(response_txs)); + if (auto e = iroha::expected::resultToOptionalError(txs_result)) + return true; + + return true; + } else { + next_page = shared_model::crypto::Hash(tx_hash); + return false; + } + }; + + rocksdb::Status status = rocksdb::Status::OK(); + if (query.paginationMeta().firstTxHash()) { + std::string target_hash; + if (auto result = + forTransactionStatus( + common, + toLowerAppend(query.paginationMeta().firstTxHash()->toString(), + target_hash)); + expected::hasValue(result)) { + assert(ordering_ptr->field + == shared_model::interface::Ordering::Field::kCreatedTime + || ordering_ptr->field + == shared_model::interface::Ordering::Field::kPosition); + + auto const &[tx_status, tx_height, tx_index, tx_ts] = + staticSplitId<4ull>(*result.template assumeValue(), "#"); + + if (ordering_ptr->field + == shared_model::interface::Ordering::Field::kCreatedTime) { + auto it = common.template seek(fmtstrings::kTransactionByTs, + query.accountId(), + tx_ts, + tx_height, + tx_index); + status = enumerateKeysAndValues(common, + parser, + it, + fmtstrings::kPathTransactionByTs, + query.accountId()); + } else { + auto it = common.template seek(fmtstrings::kTransactionByPosition, + query.accountId(), + tx_height, + tx_index, + tx_ts); + status = enumerateKeysAndValues(common, + parser, + it, + fmtstrings::kPathTransactionByPosition, + query.accountId()); + } + } + } else { + if (ordering_ptr->field + == shared_model::interface::Ordering::Field::kCreatedTime) { + auto it = common.template seek(fmtstrings::kTransactionByTsLowerBound, + query.accountId(), + bounds.tsFrom); + status = enumerateKeysAndValues(common, + parser, + it, + fmtstrings::kPathTransactionByTs, + query.accountId()); + } else { + auto it = common.template seek(fmtstrings::kTransactionByHeight, + query.accountId(), + bounds.heightFrom); + status = enumerateKeysAndValues(common, + parser, + it, + fmtstrings::kPathTransactionByPosition, + query.accountId()); + } + } + + RDB_ERROR_CHECK(canExist(status, [&]() { + return fmt::format("Enumerate transactions for account {}", + query.accountId()); + })); + + return query_response_factory->createTransactionsPageResponse( + std::move(response_txs), + next_page, + opt_txs_total ? *opt_txs_total : 0ull, + query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(query.accountId()); + + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + query.accountId(), + creator_id, + creator_permissions, + Role::kGetAllAccTxs, + Role::kGetDomainAccTxs, + Role::kGetMyAccTxs)); + + return readTxs(common, query_response_factory_, query, query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + RDB_ERROR_CHECK(checkPermissions(creator_domain_id, + creator_domain_id, + creator_permissions, + Role::kGetAllTxs, + Role::kGetMyTxs)); + + std::string h_hex; + std::vector> + response_txs; + + bool const canRequestAll = creator_permissions.isSet(Role::kGetAllTxs); + for (auto const &hash : query.transactionHashes()) { + h_hex.clear(); + toLowerAppend(hash.hex(), h_hex); + + std::optional opt; + if (auto r = forTransactionStatus( + common, h_hex); + expected::hasError(r)) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kStatefulFailed, + fmt::format("Query: {}, message: {}", + query.toString(), + r.assumeError().description), + ErrorCodes::kNoTransaction, + query_hash); + else + opt = std::move(r.assumeValue()); + + auto const &[tx_status, tx_height, tx_index, tx_ts] = + staticSplitId<4ull>(*opt, "#"); + + TxPosition tx_position = {0ull, 0ull, 0ull}; + decodePosition(tx_ts, tx_height, tx_index, tx_position); + + if (auto r = + forTransactionByPosition( + common, + creator_id, + tx_position.ts, + tx_position.height, + tx_position.index); + !canRequestAll + && (expected::hasError(r) + || staticSplitId<2ull>(*r.assumeValue(), "%").at(1) != h_hex)) + continue; + + auto txs_result = + getTransactionsFromBlock(tx_position.height, + tx_position.index, + [](auto &) { return true; }, + std::back_inserter(response_txs)); + if (auto e = iroha::expected::resultToOptionalError(txs_result)) + return makeError( + ErrorCodes::kRetrieveTransactionsFailed, + "Retrieve txs failed: {}", + e.value()); + } + + return query_response_factory_->createTransactionsResponse( + std::move(response_txs), query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountAssetTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(query.accountId()); + + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + query.accountId(), + creator_id, + creator_permissions, + Role::kGetAllAccAstTxs, + Role::kGetDomainAccAstTxs, + Role::kGetMyAccAstTxs)); + + return readTxs(common, query_response_factory_, query, query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountAssets &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(query.accountId()); + + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + query.accountId(), + creator_id, + creator_permissions, + Role::kGetAllAccAst, + Role::kGetDomainAccAst, + Role::kGetMyAccAst)); + + RDB_TRY_GET_VALUE( + opt_acc_asset_size, + forAccountAssetSize( + common, account_name, domain_id)); + + uint64_t account_asset_size = opt_acc_asset_size ? *opt_acc_asset_size : 0ull; + + const auto pagination_meta{query.paginationMeta()}; + const auto req_first_asset_id = + pagination_meta | [](auto const &pagination_meta) { + return pagination_meta.get().firstAssetId(); + }; + const auto req_page_size = pagination_meta | [](const auto &pagination_meta) { + return std::optional(pagination_meta.get().pageSize()); + }; + + std::vector> + assets; + std::optional next_asset_id; + + bool first_found = !req_first_asset_id; + uint64_t remains = req_page_size ? *req_page_size + 1ull + : std::numeric_limits::max(); + auto status = enumerateKeysAndValues( + common, + [&](auto asset, auto value) { + if (!first_found) { + if (asset.ToStringView() != *req_first_asset_id) + return true; + first_found = true; + } + + if (remains-- > 1ull) { + assets.emplace_back( + query.accountId(), + asset.ToStringView(), + shared_model::interface::Amount(value.ToStringView())); + return true; + } else { + next_asset_id = asset.ToStringView(); + return false; + } + }, + fmtstrings::kPathAccountAssets, + domain_id, + account_name); + RDB_ERROR_CHECK(canExist(status, [&] { + return fmt::format("Account {} assets", query.accountId()); + })); + + if (assets.empty() and req_first_asset_id) + return makeError( + ErrorCodes::kAssetNotFound, "Asset {} not found", *req_first_asset_id); + + return query_response_factory_->createAccountAssetResponse( + assets, account_asset_size, next_asset_id, query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountDetail &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + auto const &[creator_account_name, creator_domain_id] = + staticSplitId<2ull>(creator_id); + auto const &[account_name, domain_id] = + staticSplitId<2ull>(query.accountId()); + + if (auto r = forAccount( + common, account_name, domain_id); + expected::hasError(r)) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kNoAccountDetail, + fmt::format("Query: {}, message: {}", + query.toString(), + r.assumeError().description), + r.assumeError().code, + query_hash); + + RDB_ERROR_CHECK(checkPermissions(domain_id, + creator_domain_id, + query.accountId(), + creator_id, + creator_permissions, + Role::kGetAllAccDetail, + Role::kGetDomainAccDetail, + Role::kGetMyAccDetail)); + + auto writer = query.writer(); + auto key = query.key(); + auto pagination = query.paginationMeta(); + + std::optional p; + if (pagination) { + std::optional fe; + if (pagination->get().firstRecordId()) + fe = PaginationContext::FirstEntry{ + pagination->get().firstRecordId()->get().writer(), + pagination->get().firstRecordId()->get().key()}; + + p = PaginationContext{std::move(fe), pagination->get().pageSize()}; + } + + std::string next_writer, next_key; + uint64_t total; + RDB_TRY_GET_VALUE(json, + aggregateAccountDetails( + common, + account_name, + domain_id, + total, + writer ? std::string_view{*writer} : std::string_view{}, + key ? std::string_view{*key} : std::string_view{}, + std::move(p), + &next_writer, + &next_key)); + + std::optional next; + if (!next_writer.empty() || !next_key.empty()) + next = shared_model::plain::AccountDetailRecordId(std::move(next_writer), + std::move(next_key)); + + RDB_TRY_GET_VALUE( + opt_acc_details_count, + forAccountDetailsCount( + common, account_name, domain_id)); + return query_response_factory_->createAccountDetailResponse( + json, total, next, query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetRoles &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kGetRoles)); + + std::vector roles; + auto status = enumerateKeys(common, + [&](auto const &role) { + if (!role.empty()) + roles.emplace_back(role.ToStringView()); + else { + assert(!"Role can not be empty string!"); + } + return true; + }, + fmtstrings::kPathRoles); + RDB_ERROR_CHECK(canExist(status, [&] { return "Enumerate roles"; })); + + return query_response_factory_->createRolesResponse(std::move(roles), + query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetRolePermissions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kGetRoles)); + auto &role_id = query.roleId(); + + RDB_TRY_GET_VALUE( + opt_permissions, + forRole(common, role_id)); + + return query_response_factory_->createRolePermissionsResponse( + *opt_permissions, query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetAssetInfo &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kReadAssets)); + auto const &[asset_name, domain_id] = staticSplitId<2ull>(query.assetId()); + + if (auto result = forAsset( + common, asset_name, domain_id); + expected::hasError(result)) + return query_response_factory_->createErrorQueryResponse( + ErrorQueryType::kNoAsset, + fmt::format("Query: {}, message: {}", + query.toString(), + result.assumeError().description), + result.assumeError().code, + query_hash); + else + return query_response_factory_->createAssetResponse( + query.assetId(), + std::string{domain_id}, + static_cast( + *result.assumeValue()), + query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetPendingTransactions &q, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + std::vector> + response_txs; + if (q.paginationMeta()) { + return pending_txs_storage_ + ->getPendingTransactions(creator_id, + q.paginationMeta()->get().pageSize(), + q.paginationMeta()->get().firstTxHash()) + .match( + [this, &response_txs, &query_hash](auto &&response) { + auto &interface_txs = response.value.transactions; + response_txs.reserve(interface_txs.size()); + // TODO igor-egorov 2019-06-06 IR-555 avoid use of clone() + std::transform(interface_txs.begin(), + interface_txs.end(), + std::back_inserter(response_txs), + [](auto &tx) { return clone(*tx); }); + return query_response_factory_ + ->createPendingTransactionsPageResponse( + std::move(response_txs), + response.value.all_transactions_size, + std::move(response.value.next_batch_info), + query_hash); + }, + [this, &q, &query_hash](auto &&error) { + switch (error.error) { + case iroha::PendingTransactionStorage::ErrorCode::kNotFound: + return query_response_factory_->createErrorQueryResponse( + shared_model::interface::QueryResponseFactory:: + ErrorQueryType::kStatefulFailed, + std::string("The batch with specified first " + "transaction hash not found, the hash: ") + + q.paginationMeta()->get().firstTxHash()->toString(), + 4, // missing first tx hash error + query_hash); + default: + BOOST_ASSERT_MSG(false, + "Unknown and unhandled type of error " + "happend in pending txs storage"); + return query_response_factory_->createErrorQueryResponse( + shared_model::interface::QueryResponseFactory:: + ErrorQueryType::kStatefulFailed, + std::string("Unknown type of error happened: ") + + std::to_string(error.error), + 1, // unknown internal error + query_hash); + } + }); + } else { // TODO 2019-06-06 igor-egorov IR-516 remove deprecated + // interface + auto interface_txs = + pending_txs_storage_->getPendingTransactions(creator_id); + response_txs.reserve(interface_txs.size()); + + std::transform(interface_txs.begin(), + interface_txs.end(), + std::back_inserter(response_txs), + [](auto &tx) { return clone(*tx); }); + return query_response_factory_->createTransactionsResponse( + std::move(response_txs), query_hash); + } +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetPeers &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + RDB_ERROR_CHECK(checkPermissions(creator_permissions, Role::kGetPeers)); + std::vector> peers; + + auto status = enumerateKeysAndValues( + common, + [&](auto pubkey, auto address) { + peers.emplace_back(std::make_shared( + address.ToStringView(), + std::string{pubkey.ToStringView()}, + std::nullopt)); + return true; + }, + fmtstrings::kPathPeers); + RDB_ERROR_CHECK( + canExist(status, [&]() { return fmt::format("Enumerate peers"); })); + + for (auto &peer : peers) { + RDB_TRY_GET_VALUE(opt_tls, + forPeerTLS( + common, peer->pubkey())); + + if (opt_tls) + utils::reinterpret_pointer_cast(peer) + ->setTlsCertificate( + shared_model::interface::types::TLSCertificateType{*opt_tls}); + } + + return query_response_factory_->createPeersResponse( + std::vector>(peers.begin(), + peers.end()), + query_hash); +} + +RocksDbSpecificQueryExecutor::ExecutionResult RocksDbSpecificQueryExecutor:: +operator()( + RocksDbCommon &common, + const shared_model::interface::GetEngineReceipts &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions) { + throw std::runtime_error(fmt::format("Not implemented")); +} diff --git a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.hpp b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.hpp new file mode 100644 index 00000000000..f4b26d9ac8e --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.hpp @@ -0,0 +1,205 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_SPECIFIC_QUERY_EXECUTOR_HPP +#define IROHA_ROCKSDB_SPECIFIC_QUERY_EXECUTOR_HPP + +#include "ametsuchi/specific_query_executor.hpp" + +#include +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "common/result.hpp" +#include "interfaces/iroha_internal/query_response_factory.hpp" +#include "interfaces/permissions.hpp" + +namespace rocksdb { + class Transaction; +} + +namespace shared_model::interface { + class GetAccount; + class GetBlock; + class GetSignatories; + class GetAccountTransactions; + class GetAccountAssetTransactions; + class GetTransactions; + class GetAccountAssets; + class GetAccountDetail; + class GetRoles; + class GetRolePermissions; + class GetAssetInfo; + class GetPendingTransactions; + class GetPeers; + class GetEngineReceipts; + class PermissionToString; +} // namespace shared_model::interface + +namespace iroha { + class PendingTransactionStorage; +} // namespace iroha + +namespace iroha::ametsuchi { + class BlockStorage; + + class RocksDbSpecificQueryExecutor : public SpecificQueryExecutor { + public: + using ExecutionResult = expected::Result; + + enum ErrorCodes { + kFetchBlockFailed = 1, + kQueryHeightOverflow = 3, + kAssetNotFound = 4, + kNoTransaction = 4, + kRetrieveTransactionsFailed = 1010, + }; + + RocksDbSpecificQueryExecutor( + std::shared_ptr db_context, + BlockStorage &block_store, + std::shared_ptr pending_txs_storage, + std::shared_ptr + response_factory, + std::shared_ptr + perm_converter); + + std::shared_ptr getTxContext(); + + QueryExecutorResult execute( + const shared_model::interface::Query &qry) override; + + bool hasAccountRolePermission( + shared_model::interface::permissions::Role permission, + const std::string &account_id) const override; + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccount &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetBlock &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetSignatories &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountAssetTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountAssets &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetAccountDetail &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetRoles &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetRolePermissions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetAssetInfo &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetPendingTransactions &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetPeers &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + ExecutionResult operator()( + RocksDbCommon &common, + const shared_model::interface::GetEngineReceipts &query, + const shared_model::interface::types::AccountIdType &creator_id, + const shared_model::interface::types::HashType &query_hash, + shared_model::interface::RolePermissionSet const &creator_permissions); + + private: + mutable std::shared_ptr db_context_; + BlockStorage &block_store_; + std::shared_ptr pending_txs_storage_; + std::shared_ptr + query_response_factory_; + std::shared_ptr + perm_converter_; + + /** + * Get transactions from block using range from range_gen and filtered by + * predicate pred and store them in dest_it + */ + template + iroha::expected::Result getTransactionsFromBlock( + uint64_t block_id, + uint64_t tx_index, + Pred &&pred, + OutputIterator dest_it); + + template + ExecutionResult readTxs( + RocksDbCommon &common, + std::shared_ptr + &query_response_factory, + const Qry &query, + const shared_model::interface::types::HashType &query_hash); + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_ROCKSDB_SPECIFIC_QUERY_EXECUTOR_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp b/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp new file mode 100644 index 00000000000..53b8ab305c5 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp @@ -0,0 +1,231 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_storage_impl.hpp" + +#include + +#include "ametsuchi/impl/block_index_impl.hpp" +#include "ametsuchi/impl/mutable_storage_impl.hpp" +#include "ametsuchi/impl/peer_query_wsv.hpp" +#include "ametsuchi/impl/rocksdb_block_query.hpp" +#include "ametsuchi/impl/rocksdb_command_executor.hpp" +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "ametsuchi/impl/rocksdb_indexer.hpp" +#include "ametsuchi/impl/rocksdb_query_executor.hpp" +#include "ametsuchi/impl/rocksdb_settings_query.hpp" +#include "ametsuchi/impl/rocksdb_specific_query_executor.hpp" +#include "ametsuchi/impl/rocksdb_temporary_wsv_impl.hpp" +#include "ametsuchi/impl/rocksdb_wsv_command.hpp" +#include "ametsuchi/impl/rocksdb_wsv_query.hpp" +#include "ametsuchi/impl/temporary_wsv_impl.hpp" +#include "ametsuchi/ledger_state.hpp" +#include "ametsuchi/tx_executor.hpp" +#include "common/bind.hpp" +#include "common/result.hpp" +#include "logger/logger.hpp" +#include "logger/logger_manager.hpp" + +namespace iroha::ametsuchi { + + RocksDbStorageImpl::RocksDbStorageImpl( + std::shared_ptr db_context, + boost::optional> ledger_state, + std::shared_ptr block_store, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::optional> vm_caller, + std::function)> + callback, + logger::LoggerManagerTreePtr log_manager) + : StorageBase(std::move(ledger_state), + std::move(block_store), + std::move(perm_converter), + std::move(pending_txs_storage), + std::move(query_response_factory), + std::move(temporary_block_storage_factory), + std::move(vm_caller), + std::move(log_manager), + "prepared_block_", + std::move(callback), + false), + db_context_(std::move(db_context)) {} + + std::unique_ptr RocksDbStorageImpl::createTemporaryWsv( + std::shared_ptr command_executor) { + auto rdb_command_executor = + std::dynamic_pointer_cast(command_executor); + if (rdb_command_executor == nullptr) { + throw std::runtime_error("Bad CommandExecutor cast!"); + } + // if we create temporary storage, then we intend to validate a new + // proposal. this means that any state prepared before that moment is + // not needed and must be removed to prevent locking + command_executor->skipChanges(); + return std::make_unique( + std::move(rdb_command_executor), + logManager()->getChild("TemporaryWorldStateView")); + } + + expected::Result, std::string> + RocksDbStorageImpl::createQueryExecutor( + std::shared_ptr pending_txs_storage, + std::shared_ptr + response_factory) const { + auto log_manager = logManager()->getChild("QueryExecutor"); + return std::make_unique( + response_factory, + std::make_shared( + db_context_, + *blockStore(), + std::move(pending_txs_storage), + response_factory, + permConverter()), + log_manager->getLogger()); + } + + expected::Result RocksDbStorageImpl::insertPeer( + const shared_model::interface::Peer &peer) { + log()->info("Insert peer {}", peer.pubkey()); + RocksDBWsvCommand wsv_command(db_context_); + return wsv_command.insertPeer(peer); + } + + expected::Result, std::string> + RocksDbStorageImpl::createCommandExecutor() { + return std::make_unique( + db_context_, permConverter(), vmCaller()); + } + + expected::Result, std::string> + RocksDbStorageImpl::createMutableStorage( + std::shared_ptr command_executor) { + return createMutableStorage(std::move(command_executor), + *temporaryBlockStorageFactory()); + } + + expected::Result, std::string> + RocksDbStorageImpl::createMutableStorage( + std::shared_ptr command_executor, + BlockStorageFactory &storage_factory) { + // if we create mutable storage, then we intend to mutate wsv + // this means that any state prepared before that moment is not needed + // and must be removed to prevent locking + command_executor->skipChanges(); + + auto ms_log_manager = logManager()->getChild("RocksDbMutableStorageImpl"); + auto wsv_command = std::make_unique(db_context_); + auto peer_query = + std::make_unique(std::make_shared( + db_context_, ms_log_manager->getChild("WsvQuery")->getLogger())); + auto block_index = std::make_unique( + std::make_unique(db_context_), + ms_log_manager->getChild("BlockIndexImpl")->getLogger()); + + return std::make_unique( + ledgerState(), + std::move(wsv_command), + std::move(peer_query), + std::move(block_index), + std::move(command_executor), + storage_factory.create().assumeValue(), + std::move(ms_log_manager)); + } + + iroha::expected::Result RocksDbStorageImpl::resetPeers() { + log()->info("Remove everything from peers table. [UNUSED]"); + return {}; + } + + void RocksDbStorageImpl::freeConnections() { + log()->info("Free connections. [UNUSED]"); + } + + expected::Result, std::string> + RocksDbStorageImpl::create( + std::shared_ptr db_context, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::shared_ptr persistent_block_storage, + std::optional> vm_caller_ref, + std::function)> + callback, + logger::LoggerManagerTreePtr log_manager) { + boost::optional> ledger_state; + { + RocksDBWsvQuery wsv_query(db_context, + log_manager->getChild("WsvQuery")->getLogger()); + + auto maybe_top_block_info = wsv_query.getTopBlockInfo(); + auto maybe_ledger_peers = wsv_query.getPeers(); + + if (expected::hasValue(maybe_top_block_info) and maybe_ledger_peers) + ledger_state = std::make_shared( + std::move(*maybe_ledger_peers), + maybe_top_block_info.assumeValue().height, + maybe_top_block_info.assumeValue().top_hash); + } + + return expected::makeValue(std::shared_ptr( + new RocksDbStorageImpl(std::move(db_context), + std::move(ledger_state), + std::move(persistent_block_storage), + perm_converter, + std::move(pending_txs_storage), + std::move(query_response_factory), + std::move(temporary_block_storage_factory), + std::move(vm_caller_ref), + std::move(callback), + std::move(log_manager)))); + } + + CommitResult RocksDbStorageImpl::commitPrepared( + std::shared_ptr block) { + RocksDbTransaction tx_context(db_context_); + + RocksDBWsvCommand wsv_command(db_context_); + RocksDBWsvQuery wsv_query( + db_context_, this->logManager()->getChild("WsvQuery")->getLogger()); + auto indexer = std::make_unique(db_context_); + + return StorageBase::commitPreparedImpl( + block, tx_context, wsv_command, wsv_query, std::move(indexer)); + } + + std::shared_ptr RocksDbStorageImpl::getWsvQuery() const { + return std::make_shared( + db_context_, logManager()->getChild("WsvQuery")->getLogger()); + } + + std::shared_ptr RocksDbStorageImpl::getBlockQuery() const { + return std::make_shared( + db_context_, + *blockStore(), + logManager()->getChild("RocksDbBlockQuery")->getLogger()); + } + + boost::optional> + RocksDbStorageImpl::createSettingQuery() const { + std::unique_ptr setting_query_ptr = + std::make_unique( + db_context_, + logManager()->getChild("RocksDbSettingQuery")->getLogger()); + return boost::make_optional(std::move(setting_query_ptr)); + } + + void RocksDbStorageImpl::prepareBlock(std::unique_ptr wsv) { + RocksDbTransaction db_context(db_context_); + StorageBase::prepareBlockImpl(std::move(wsv), db_context); + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_storage_impl.hpp b/irohad/ametsuchi/impl/rocksdb_storage_impl.hpp new file mode 100644 index 00000000000..f89fb97c84b --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_storage_impl.hpp @@ -0,0 +1,114 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_STORAGE_IMPL_HPP +#define IROHA_ROCKSDB_STORAGE_IMPL_HPP + +#include "ametsuchi/impl/storage_base.hpp" + +namespace shared_model { + namespace interface { + class QueryResponseFactory; + } // namespace interface +} // namespace shared_model +namespace iroha { + + class PendingTransactionStorage; + + namespace ametsuchi { + + struct RocksDBPort; + class AmetsuchiTest; + class PostgresOptions; + class VmCaller; + class RocksDbCommon; + struct RocksDBContext; + + class RocksDbStorageImpl final : public StorageBase { + public: + static expected::Result, std::string> + create( + std::shared_ptr db_context, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::shared_ptr persistent_block_storage, + std::optional> vm_caller_ref, + std::function)> callback, + logger::LoggerManagerTreePtr log_manager); + + expected::Result, std::string> + createCommandExecutor() override; + + std::unique_ptr createTemporaryWsv( + std::shared_ptr command_executor) override; + + boost::optional> createSettingQuery() + const override; + + iroha::expected::Result, std::string> + createQueryExecutor( + std::shared_ptr pending_txs_storage, + std::shared_ptr + response_factory) const override; + + expected::Result insertPeer( + const shared_model::interface::Peer &peer) override; + + iroha::expected::Result, std::string> + createMutableStorage(std::shared_ptr command_executor, + BlockStorageFactory &storage_factory) override; + + expected::Result, std::string> + createMutableStorage( + std::shared_ptr command_executor) override; + + iroha::expected::Result resetPeers() override; + + void freeConnections() override; + + CommitResult commitPrepared( + std::shared_ptr block) override; + + std::shared_ptr getWsvQuery() const override; + + std::shared_ptr getBlockQuery() const override; + + void prepareBlock(std::unique_ptr wsv) override; + + ~RocksDbStorageImpl() override = default; + + protected: + RocksDbStorageImpl( + std::shared_ptr db_context, + boost::optional> + ledger_state, + std::shared_ptr block_store, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::optional> vm_caller, + std::function)> callback, + logger::LoggerManagerTreePtr log_manager); + + private: + using StoreBlockResult = iroha::expected::Result; + + friend class ::iroha::ametsuchi::AmetsuchiTest; + std::shared_ptr db_context_; + }; + + } // namespace ametsuchi +} // namespace iroha + +#endif // IROHA_ROCKSDB_STORAGE_IMPL_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp new file mode 100644 index 00000000000..26d21ff030e --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp @@ -0,0 +1,66 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_temporary_wsv_impl.hpp" + +#include "ametsuchi/impl/rocksdb_command_executor.hpp" +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "ametsuchi/impl/rocksdb_db_transaction.hpp" +#include "ametsuchi/tx_executor.hpp" +#include "interfaces/commands/command.hpp" +#include "interfaces/permission_to_string.hpp" +#include "interfaces/transaction.hpp" +#include "logger/logger.hpp" +#include "logger/logger_manager.hpp" + +namespace iroha::ametsuchi { + + RocksDbTemporaryWsvImpl::RocksDbTemporaryWsvImpl( + std::shared_ptr command_executor, + logger::LoggerManagerTreePtr log_manager) + : TemporaryWsvImpl(command_executor, log_manager), + tx_context_(command_executor->getSession()) {} + + expected::Result + RocksDbTemporaryWsvImpl::validateSignatures( + const shared_model::interface::Transaction &transaction) { + auto const &[account, domain] = + staticSplitId<2>(transaction.creatorAccountId()); + RocksDbCommon common(tx_context_); + + uint64_t quorum; + if (auto result = forQuorum( + common, account, domain); + expected::hasError(result)) + return expected::makeError( + validation::CommandError{"signatures validation", + result.assumeError().code, + result.assumeError().description, + false}); + else + quorum = *result.assumeValue(); + + for (auto &signatory : transaction.signatures()) + if (auto result = + forSignatory( + common, account, domain, signatory.publicKey()); + expected::hasError(result)) + return expected::makeError( + validation::CommandError{"signatures validation", + 1, + result.assumeError().description, + false}); + + if (boost::size(transaction.signatures()) < quorum) { + auto error_str = "Transaction " + transaction.toString() + + " failed signatures validation"; + return expected::makeError(validation::CommandError{ + "signatures validation", 2, error_str, false}); + } + + return {}; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.hpp b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.hpp new file mode 100644 index 00000000000..a708b12408e --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.hpp @@ -0,0 +1,38 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_TEMPORARY_WSV_IMPL_HPP +#define IROHA_ROCKSDB_TEMPORARY_WSV_IMPL_HPP + +#include "ametsuchi/impl/temporary_wsv_impl.hpp" + +namespace shared_model::interface { + class PermissionToString; +} // namespace shared_model::interface + +namespace iroha::ametsuchi { + + class TransactionExecutor; + class RocksDbCommandExecutor; + struct RocksDBContext; + + class RocksDbTemporaryWsvImpl final : public TemporaryWsvImpl { + public: + RocksDbTemporaryWsvImpl( + std::shared_ptr command_executor, + logger::LoggerManagerTreePtr log_manager); + + ~RocksDbTemporaryWsvImpl() = default; + + protected: + expected::Result validateSignatures( + const shared_model::interface::Transaction &transaction); + + std::shared_ptr tx_context_; + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_TEMPORARY_WSV_IMPL_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp b/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp new file mode 100644 index 00000000000..50169da15b7 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp @@ -0,0 +1,453 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_wsv_command.hpp" + +#include + +#include +#include "ametsuchi/impl/executor_common.hpp" +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "ametsuchi/ledger_state.hpp" +#include "backend/protobuf/permissions.hpp" +#include "interfaces/common_objects/account.hpp" +#include "interfaces/common_objects/account_asset.hpp" +#include "interfaces/common_objects/asset.hpp" +#include "interfaces/common_objects/domain.hpp" +#include "interfaces/common_objects/peer.hpp" + +namespace iroha::ametsuchi { + + template + WsvCommandResult execute(std::shared_ptr &context, + Func &&func, + Error &&error) { + RocksDbCommon common(context); + if (auto result = std::forward(func)(common); + expected::hasError(result)) + return expected::makeError( + fmt::format("Command: {}, DB error: {} with description {}", + std::forward(error)(), + result.assumeError().code, + result.assumeError().description)); + + return {}; + } + + RocksDBWsvCommand::RocksDBWsvCommand( + std::shared_ptr db_context) + : db_context_(std::move(db_context)) { + assert(db_context_); + } + + WsvCommandResult RocksDBWsvCommand::insertRole( + const shared_model::interface::types::RoleIdType &role_name) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + RDB_ERROR_CHECK(forRole( + common, role_name)); + + shared_model::interface::RolePermissionSet role_permissions; + common.valueBuffer().assign(role_permissions.toBitstring()); + RDB_ERROR_CHECK(forRole(common, role_name)); + + return {}; + }, + [&]() { return fmt::format("Insert role {}", role_name); }); + } + + WsvCommandResult RocksDBWsvCommand::insertAccountRole( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::RoleIdType &role_name) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + auto const names = staticSplitId<2ull>(account_id); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + common.valueBuffer() = ""; + RDB_ERROR_CHECK(forAccountRole( + common, account_name, domain_id, role_name)); + + return {}; + }, + [&]() { + return fmt::format( + "Insert account {} role {}", account_id, role_name); + }); + } + + WsvCommandResult RocksDBWsvCommand::deleteAccountRole( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::RoleIdType &role_name) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + auto const names = staticSplitId<2ull>(account_id); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + RDB_ERROR_CHECK( + forAccountRole( + common, account_name, domain_id, role_name)); + + return {}; + }, + [&]() { + return fmt::format( + "Delete account {} role {}", account_id, role_name); + }); + } + + WsvCommandResult RocksDBWsvCommand::insertRolePermissions( + const shared_model::interface::types::RoleIdType &role_id, + const shared_model::interface::RolePermissionSet &permissions) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + common.valueBuffer().assign(permissions.toBitstring()); + RDB_ERROR_CHECK(forRole(common, role_id)); + + return {}; + }, + [&]() { return fmt::format("Insert role {}", role_id); }); + } + + WsvCommandResult RocksDBWsvCommand::insertAccountGrantablePermission( + const shared_model::interface::types::AccountIdType &permittee_account_id, + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::permissions::Grantable permission) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + auto names = staticSplitId<2ull>(account_id); + auto &account_name = names.at(0); + auto &domain_id = names.at(1); + + shared_model::interface::GrantablePermissionSet + granted_account_permissions; + { + RDB_TRY_GET_VALUE( + perm, + forGrantablePermissions( + common, account_name, domain_id, permittee_account_id)); + if (perm) + granted_account_permissions = std::move(*perm); + } + + granted_account_permissions.set(permission); + common.valueBuffer().assign( + granted_account_permissions.toBitstring()); + RDB_ERROR_CHECK( + forGrantablePermissions( + common, account_name, domain_id, permittee_account_id)); + + return {}; + }, + [&]() { + return fmt::format("Insert account {} grantable permission {} for {}", + account_id, + permission, + permittee_account_id); + }); + } + + WsvCommandResult RocksDBWsvCommand::deleteAccountGrantablePermission( + const shared_model::interface::types::AccountIdType &permittee_account_id, + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::permissions::Grantable permission) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + auto const names = staticSplitId<2ull>(account_id); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + shared_model::interface::GrantablePermissionSet + granted_account_permissions; + { + RDB_TRY_GET_VALUE( + perm, + forGrantablePermissions( + common, account_name, domain_id, permittee_account_id)); + if (perm) + granted_account_permissions = std::move(*perm); + } + + granted_account_permissions.unset(permission); + common.valueBuffer().assign( + granted_account_permissions.toBitstring()); + RDB_ERROR_CHECK( + forGrantablePermissions( + common, account_name, domain_id, permittee_account_id)); + + return {}; + }, + [&]() { + return fmt::format("Delete account {} grantable permission {} for {}", + account_id, + permission, + permittee_account_id); + }); + } + + WsvCommandResult RocksDBWsvCommand::insertAccount( + const shared_model::interface::Account &account) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + common.encode(account.quorum()); + RDB_ERROR_CHECK(forQuorum( + common, account.accountId(), account.domainId())); + + assert(account.jsonData() == "{}"); + return {}; + }, + [&]() { + return fmt::format("Insert account {}#{} details", + account.accountId(), + account.domainId()); + }); + } + + WsvCommandResult RocksDBWsvCommand::insertAsset( + const shared_model::interface::Asset &asset) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + common.encode(asset.precision()); + RDB_ERROR_CHECK(forAsset( + common, asset.assetId(), asset.domainId())); + return {}; + }, + [&]() { + return fmt::format("Insert asset {}#{} with precision {}", + asset.assetId(), + asset.domainId(), + asset.precision()); + }); + } + + WsvCommandResult RocksDBWsvCommand::upsertAccountAsset( + const shared_model::interface::AccountAsset &asset) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + auto const names = staticSplitId<2ull>(asset.accountId()); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + common.valueBuffer().assign(asset.balance().toStringRepr()); + RDB_ERROR_CHECK(forAccountAsset( + common, account_name, domain_id, asset.assetId())); + return {}; + }, + [&]() { + return fmt::format("Account {} asset {} balance {}", + asset.accountId(), + asset.assetId(), + asset.balance().toStringRepr()); + }); + } + + WsvCommandResult RocksDBWsvCommand::insertSignatory( + shared_model::interface::types::PublicKeyHexStringView signatory) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + return makeError(ErrorCodes::kNotUsed, "Not used"); + }, + [&]() { return fmt::format("Insert signatory {}", signatory); }); + } + + WsvCommandResult RocksDBWsvCommand::insertAccountSignatory( + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::types::PublicKeyHexStringView signatory) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + auto const names = staticSplitId<2ull>(account_id); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + std::string result; + std::transform(((std::string_view)signatory).begin(), + ((std::string_view)signatory).end(), + std::back_inserter(result), + [](auto c) { return std::tolower(c); }); + + common.valueBuffer() = ""; + RDB_ERROR_CHECK(forSignatory( + common, account_name, domain_id, result)); + return {}; + }, + [&]() { + return fmt::format("Account {} insert signatory {}", + account_id, + signatory); + }); + } + + WsvCommandResult RocksDBWsvCommand::deleteAccountSignatory( + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::types::PublicKeyHexStringView signatory) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + auto const names = staticSplitId<2ull>(account_id); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + std::string result; + std::transform(((std::string_view)signatory).begin(), + ((std::string_view)signatory).end(), + std::back_inserter(result), + [](auto c) { return std::tolower(c); }); + + RDB_ERROR_CHECK(forSignatory( + common, account_name, domain_id, result)); + return {}; + }, + [&]() { + return fmt::format( + "Account {} delete signatory {}", account_id, signatory); + }); + } + + WsvCommandResult RocksDBWsvCommand::deleteSignatory( + shared_model::interface::types::PublicKeyHexStringView signatory) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + return makeError(ErrorCodes::kNotUsed, "Not used"); + }, + [&]() { return fmt::format("Insert signatory {}", signatory); }); + } + + WsvCommandResult RocksDBWsvCommand::insertPeer( + const shared_model::interface::Peer &peer) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + std::string result; + std::transform(peer.pubkey().begin(), + peer.pubkey().end(), + std::back_inserter(result), + [](auto c) { return std::tolower(c); }); + + common.valueBuffer().assign(peer.address()); + RDB_ERROR_CHECK(forPeerAddress(common, result)); + + if (peer.tlsCertificate()) { + common.valueBuffer().assign(peer.tlsCertificate().value()); + RDB_ERROR_CHECK(forPeerTLS(common, result)); + } + return {}; + }, + [&]() { + return fmt::format( + "Insert peer {} with address {}", peer.pubkey(), peer.address()); + }); + } + + WsvCommandResult RocksDBWsvCommand::deletePeer( + const shared_model::interface::Peer &peer) { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + std::string result; + std::transform(peer.pubkey().begin(), + peer.pubkey().end(), + std::back_inserter(result), + [](auto c) { return std::tolower(c); }); + + RDB_ERROR_CHECK( + forPeerAddress(common, + result)); + RDB_ERROR_CHECK(forPeerTLS( + common, result)); + return {}; + }, + [&]() { + return fmt::format( + "Delete peer {} with address {}", peer.pubkey(), peer.address()); + }); + } + + WsvCommandResult RocksDBWsvCommand::insertDomain( + const shared_model::interface::Domain &domain) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + common.valueBuffer().assign(domain.defaultRole()); + RDB_ERROR_CHECK(forDomain( + common, domain.domainId())); + return {}; + }, + [&]() { + return fmt::format("Domain {} with default role {}", + domain.domainId(), + domain.defaultRole()); + }); + } + + WsvCommandResult RocksDBWsvCommand::updateAccount( + const shared_model::interface::Account &account) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + auto const names = + staticSplitId<2ull>(account.accountId()); + auto const &account_name = names.at(0); + auto const &domain_id = names.at(1); + + common.encode(account.quorum()); + RDB_ERROR_CHECK(forQuorum( + common, account_name, domain_id)); + return {}; + }, + [&]() { + return fmt::format("Account {} with quorum {}", + account.accountId(), + account.quorum()); + }); + } + + WsvCommandResult RocksDBWsvCommand::setAccountKV( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::AccountIdType &creator_account_id, + const std::string &key, + const std::string &val) { + return execute(db_context_, + [&](auto &common) -> expected::Result { + return makeError(ErrorCodes::kNotUsed, "Not used"); + }, + [&]() { + return fmt::format( + "Set account {} kv with creator {} and key {}", + account_id, + creator_account_id, + key); + }); + } + + WsvCommandResult RocksDBWsvCommand::setTopBlockInfo( + const TopBlockInfo &top_block_info) const { + return execute( + db_context_, + [&](auto &common) -> expected::Result { + common.valueBuffer() = std::to_string(top_block_info.height); + common.valueBuffer() += "#"; + common.valueBuffer() += top_block_info.top_hash.hex(); + + RDB_ERROR_CHECK(forTopBlockInfo(common)); + return {}; + }, + [&]() { + return fmt::format("Top block height {} and hash {}", + top_block_info.height, + top_block_info.top_hash.hex()); + }); + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp b/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp new file mode 100644 index 00000000000..3ebdb0409ec --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp @@ -0,0 +1,88 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_WSV_COMMAND_HPP +#define IROHA_ROCKSDB_WSV_COMMAND_HPP + +#include "ametsuchi/wsv_command.hpp" + +#include "interfaces/common_objects/string_view_types.hpp" + +namespace iroha { + namespace ametsuchi { + struct RocksDBPort; + struct RocksDBContext; + + class RocksDBWsvCommand : public WsvCommand { + public: + enum ErrorCodes { kNotUsed = 1000 }; + + explicit RocksDBWsvCommand(std::shared_ptr db_context); + WsvCommandResult insertRole( + const shared_model::interface::types::RoleIdType &role_name) override; + WsvCommandResult insertAccountRole( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::RoleIdType &role_name) override; + WsvCommandResult deleteAccountRole( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::RoleIdType &role_name) override; + WsvCommandResult insertRolePermissions( + const shared_model::interface::types::RoleIdType &role_id, + const shared_model::interface::RolePermissionSet &permissions) + override; + WsvCommandResult insertAccount( + const shared_model::interface::Account &account) override; + WsvCommandResult updateAccount( + const shared_model::interface::Account &account) override; + WsvCommandResult setAccountKV( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::AccountIdType + &creator_account_id, + const std::string &key, + const std::string &val) override; + WsvCommandResult insertAsset( + const shared_model::interface::Asset &asset) override; + WsvCommandResult upsertAccountAsset( + const shared_model::interface::AccountAsset &asset) override; + WsvCommandResult insertSignatory( + shared_model::interface::types::PublicKeyHexStringView signatory) + override; + WsvCommandResult insertAccountSignatory( + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::types::PublicKeyHexStringView signatory) + override; + WsvCommandResult deleteAccountSignatory( + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::types::PublicKeyHexStringView signatory) + override; + WsvCommandResult deleteSignatory( + shared_model::interface::types::PublicKeyHexStringView signatory) + override; + WsvCommandResult insertPeer( + const shared_model::interface::Peer &peer) override; + WsvCommandResult deletePeer( + const shared_model::interface::Peer &peer) override; + WsvCommandResult insertDomain( + const shared_model::interface::Domain &domain) override; + WsvCommandResult insertAccountGrantablePermission( + const shared_model::interface::types::AccountIdType + &permittee_account_id, + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::permissions::Grantable permission) override; + WsvCommandResult deleteAccountGrantablePermission( + const shared_model::interface::types::AccountIdType + &permittee_account_id, + const shared_model::interface::types::AccountIdType &account_id, + shared_model::interface::permissions::Grantable permission) override; + WsvCommandResult setTopBlockInfo( + const TopBlockInfo &top_block_info) const override; + + private: + mutable std::shared_ptr db_context_; + }; + } // namespace ametsuchi +} // namespace iroha + +#endif // IROHA_ROCKSDB_WSV_COMMAND_HPP diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp new file mode 100644 index 00000000000..ea048d575b6 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp @@ -0,0 +1,214 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/rocksdb_wsv_query.hpp" + +#include "ametsuchi/impl/executor_common.hpp" +#include "ametsuchi/ledger_state.hpp" +#include "backend/plain/peer.hpp" +#include "common/common.hpp" +#include "common/result.hpp" +#include "logger/logger.hpp" + +namespace iroha::ametsuchi { + + using shared_model::interface::types::AccountIdType; + using shared_model::interface::types::AddressType; + using shared_model::interface::types::TLSCertificateType; + + template + boost::optional execute(std::shared_ptr &context, + logger::LoggerPtr &log, + Func &&func, + Error &&error) { + assert(log); + + RocksDbCommon common(context); + if (auto result = std::forward(func)(common); + expected::hasError(result)) { + log->error("Command: {}, DB error: {} with description {}", + std::forward(error)(), + result.assumeError().code, + result.assumeError().description); + return boost::none; + } else + return std::move(result.assumeValue()); + } + + RocksDBWsvQuery::RocksDBWsvQuery(std::shared_ptr db_context, + logger::LoggerPtr log) + : db_context_(std::move(db_context)), log_(std::move(log)) { + assert(db_context_); + } + + boost::optional> RocksDBWsvQuery::getSignatories( + const AccountIdType &account_id) { + using RetType = std::vector; + return execute( + db_context_, + log_, + [&](auto &common) -> expected::Result { + auto names = staticSplitId<2ull>(account_id); + auto &account_name = names.at(0); + auto &domain_id = names.at(1); + + RetType signatories; + auto const status = enumerateKeys( + common, + [&](auto const &signatory) { + signatories.emplace_back(signatory.ToStringView()); + return true; + }, + fmtstrings::kPathSignatories, + domain_id, + account_name); + RDB_ERROR_CHECK(canExist(status, [&]() { + return fmt::format("Enumerate signatories for account {}", + account_id); + })); + return signatories; + }, + [&]() { + return fmt::format("Get signatories for account {}", account_id); + }); + } + + boost::optional>> + RocksDBWsvQuery::getPeers() { + using RetType = std::vector>; + return execute( + db_context_, + log_, + [&](auto &common) -> expected::Result { + RetType peers; + auto status = enumerateKeysAndValues( + common, + [&](auto pubkey, auto address) { + if (!pubkey.empty()) + peers.emplace_back( + std::make_shared( + address.ToStringView(), + std::string{pubkey.ToStringView()}, + std::nullopt)); + else + assert(!"Pubkey can not be empty!"); + + return true; + }, + fmtstrings::kPathPeers); + RDB_ERROR_CHECK(canExist( + status, [&]() { return fmt::format("Enumerate peers"); })); + + for (auto &peer : peers) { + RDB_TRY_GET_VALUE( + opt_tls, + forPeerTLS( + common, peer->pubkey())); + + if (opt_tls) + utils::reinterpret_pointer_cast(peer) + ->setTlsCertificate( + shared_model::interface::types::TLSCertificateType{ + *opt_tls}); + } + + return peers; + }, + [&]() { return fmt::format("Get peers"); }); + } + + boost::optional> + RocksDBWsvQuery::getPeerByPublicKey( + shared_model::interface::types::PublicKeyHexStringView public_key) { + using RetType = std::shared_ptr; + return execute( + db_context_, + log_, + [&](auto &common) -> expected::Result { + auto pubkey = (std::string_view)public_key; + + std::string result; + std::transform(pubkey.begin(), + pubkey.end(), + std::back_inserter(result), + [](auto c) { return std::tolower(c); }); + + RDB_TRY_GET_VALUE( + opt_addr, + forPeerAddress(common, + result)); + + RDB_TRY_GET_VALUE(opt_tls, + forPeerTLS( + common, result)); + + return std::make_shared( + std::move(*opt_addr), std::string(pubkey), opt_tls); + }, + [&]() { + return fmt::format("Get peer by pubkey {}", + (std::string_view)public_key); + }); + } + + iroha::expected::Result + RocksDBWsvQuery::getTopBlockInfo() const { + RocksDbCommon common(db_context_); + if (auto result = + forTopBlockInfo(common); + expected::hasError(result)) { + auto err_msg = fmt::format( + "Command: get top block info, DB error: {} with description {}", + result.assumeError().code, + result.assumeError().description); + log_->error(err_msg); + return expected::makeError(std::move(err_msg)); + } else { + auto const data = staticSplitId<2ull>(*result.assumeValue()); + auto const &height_str = data.at(0); + auto const &hash_str = data.at(1); + + assert(!height_str.empty()); + assert(!hash_str.empty()); + + uint64_t number; + std::from_chars( + height_str.data(), height_str.data() + height_str.size(), number); + return iroha::TopBlockInfo( + number, + shared_model::crypto::Hash(shared_model::crypto::Blob::fromHexString( + std::string{hash_str}))); + } + } + + iroha::expected::Result RocksDBWsvQuery::countPeers() { + RocksDbCommon common(db_context_); + RDB_TRY_GET_VALUE_OR_STR_ERR( + opt_count, + forPeersCount(common)); + + return *opt_count; + } + + iroha::expected::Result RocksDBWsvQuery::countDomains() { + RocksDbCommon common(db_context_); + RDB_TRY_GET_VALUE_OR_STR_ERR( + opt_count, + forDomainsTotalCount(common)); + + return opt_count ? *opt_count : 0ull; + } + + iroha::expected::Result + RocksDBWsvQuery::countTransactions() { + RocksDbCommon common(db_context_); + RDB_TRY_GET_VALUE_OR_STR_ERR( + opt_count, + forTxsTotalCount(common)); + + return opt_count ? *opt_count : 0ull; + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp b/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp new file mode 100644 index 00000000000..0400fc8ade0 --- /dev/null +++ b/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp @@ -0,0 +1,47 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROCKSDB_WSV_QUERY_HPP +#define IROHA_ROCKSDB_WSV_QUERY_HPP + +#include "ametsuchi/wsv_query.hpp" + +#include "ametsuchi/impl/rocksdb_common.hpp" +#include "logger/logger_fwd.hpp" + +namespace iroha { + namespace ametsuchi { + class RocksDBWsvQuery : public WsvQuery { + public: + RocksDBWsvQuery(std::shared_ptr db_context, + logger::LoggerPtr log); + + boost::optional> getSignatories( + const shared_model::interface::types::AccountIdType &account_id) + override; + + boost::optional< + std::vector>> + getPeers() override; + + boost::optional> + getPeerByPublicKey(shared_model::interface::types::PublicKeyHexStringView + public_key) override; + + iroha::expected::Result + getTopBlockInfo() const override; + + iroha::expected::Result countPeers() override; + iroha::expected::Result countDomains() override; + iroha::expected::Result countTransactions() override; + + private: + std::shared_ptr db_context_; + logger::LoggerPtr log_; + }; + } // namespace ametsuchi +} // namespace iroha + +#endif // IROHA_ROCKSDB_WSV_QUERY_HPP diff --git a/irohad/ametsuchi/impl/storage_base.cpp b/irohad/ametsuchi/impl/storage_base.cpp new file mode 100644 index 00000000000..2e0171d4a81 --- /dev/null +++ b/irohad/ametsuchi/impl/storage_base.cpp @@ -0,0 +1,200 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ametsuchi/impl/storage_base.hpp" + +#include + +#include "ametsuchi/impl/block_index_impl.hpp" +#include "ametsuchi/impl/peer_query_wsv.hpp" +#include "ametsuchi/impl/postgres_indexer.hpp" +#include "ametsuchi/impl/postgres_wsv_query.hpp" +#include "ametsuchi/impl/temporary_wsv_impl.hpp" +#include "ametsuchi/ledger_state.hpp" +#include "ametsuchi/tx_executor.hpp" +#include "common/result.hpp" +#include "common/result_try.hpp" +#include "logger/logger.hpp" +#include "logger/logger_manager.hpp" +#include "main/subscription.hpp" + +namespace iroha::ametsuchi { + + boost::optional> StorageBase::createPeerQuery() + const { + auto wsv = getWsvQuery(); + if (not wsv) { + return boost::none; + } + return boost::make_optional>( + std::make_shared(wsv)); + } + + expected::Result StorageBase::dropBlockStorage() { + log_->info("drop block storage"); + block_store_->clear(); + return iroha::expected::Value{}; + } + + boost::optional> + StorageBase::getLedgerState() const { + return ledger_state_; + } + + StorageBase::StorageBase( + boost::optional> ledger_state, + std::shared_ptr block_store, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::optional> vm_caller_ref, + logger::LoggerManagerTreePtr log_manager, + std::string const &prepared_block_name, + std::function)> + callback, + bool prepared_blocks_enabled) + : block_store_(std::move(block_store)), + callback_(std::move(callback)), + perm_converter_(std::move(perm_converter)), + pending_txs_storage_(std::move(pending_txs_storage)), + query_response_factory_(std::move(query_response_factory)), + temporary_block_storage_factory_( + std::move(temporary_block_storage_factory)), + vm_caller_ref_(std::move(vm_caller_ref)), + log_manager_(std::move(log_manager)), + log_(log_manager_->getLogger()), + ledger_state_(std::move(ledger_state)), + prepared_blocks_enabled_(prepared_blocks_enabled), + block_is_prepared_(false), + prepared_block_name_(prepared_block_name) {} + + StorageBase::StoreBlockResult StorageBase::storeBlock( + std::shared_ptr block) { + if (blockStore()->insert(block)) { + callback_(block); + return {}; + } + return expected::makeError("Block insertion to storage failed"); + } + + bool StorageBase::preparedCommitEnabled() const { + return prepared_blocks_enabled_ and block_is_prepared_; + } + + StorageBase::~StorageBase() {} + + expected::Result StorageBase::insertBlock( + std::shared_ptr block) { + log_->info("create mutable storage"); + IROHA_EXPECTED_TRY_GET_VALUE(command_executor, createCommandExecutor()); + IROHA_EXPECTED_TRY_GET_VALUE( + mutable_storage, createMutableStorage(std::move(command_executor))); + const bool is_inserted = mutable_storage->apply(block); + commit(std::move(mutable_storage)); + if (is_inserted) { + return {}; + } + return "Stateful validation failed."; + } + + CommitResult StorageBase::commit( + std::unique_ptr mutable_storage) { + auto old_height = blockStore()->size(); + IROHA_EXPECTED_TRY_GET_VALUE( + result, std::move(*mutable_storage).commit(*blockStore())); + ledgerState(result.ledger_state); + auto new_height = blockStore()->size(); + for (auto height = old_height + 1; height <= new_height; ++height) { + auto maybe_block = blockStore()->fetch(height); + if (not maybe_block) { + return fmt::format("Failed to fetch block {}", height); + } + callback_(*std::move(maybe_block)); + } + return expected::makeValue(std::move(result.ledger_state)); + } + + void StorageBase::prepareBlockImpl(std::unique_ptr wsv, + DatabaseTransaction &db_context) { + if (not prepared_blocks_enabled_) { + log()->warn("prepared blocks are not enabled"); + return; + } + if (block_is_prepared_) { + log()->warn( + "Refusing to add new prepared state, because there already is one. " + "Multiple prepared states are not yet supported."); + } else { + try { + db_context.prepare(prepared_block_name_); + block_is_prepared_ = true; + } catch (const std::exception &e) { + log()->warn("failed to prepare state: {}", e.what()); + } + + log()->info("state prepared successfully"); + } + } + + CommitResult StorageBase::commitPreparedImpl( + std::shared_ptr block, + DatabaseTransaction &db_context, + WsvCommand &wsv_command, + WsvQuery &wsv_query, + std::unique_ptr indexer) { + if (not prepared_blocks_enabled_) { + return expected::makeError( + std::string{"prepared blocks are not enabled"}); + } + + if (not block_is_prepared_) { + return expected::makeError("there are no prepared blocks"); + } + + log()->info("applying prepared block"); + + try { + if (not blockStore()->insert(block)) { + return fmt::format("Failed to insert block {}", *block); + } + + db_context.commitPrepared(prepared_block_name_); + BlockIndexImpl block_index( + std::move(indexer), + logManager()->getChild("BlockIndex")->getLogger()); + block_index.index(*block); + block_is_prepared_ = false; + + if (auto e = expected::resultToOptionalError(wsv_command.setTopBlockInfo( + TopBlockInfo{block->height(), block->hash()}))) { + throw std::runtime_error(e.value()); + } + + callback_(block); + + decltype(std::declval().getPeers()) opt_ledger_peers; + { + if (not(opt_ledger_peers = wsv_query.getPeers())) { + return expected::makeError( + std::string{"Failed to get ledger peers! Will retry."}); + } + } + assert(opt_ledger_peers); + + ledgerState(std::make_shared( + std::move(*opt_ledger_peers), block->height(), block->hash())); + return expected::makeValue(ledgerState().value()); + } catch (const std::exception &e) { + std::string msg(fmt::format("failed to apply prepared block {}: {}", + block->hash().hex(), + e.what())); + return expected::makeError(msg); + } + } + +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/storage_base.hpp b/irohad/ametsuchi/impl/storage_base.hpp new file mode 100644 index 00000000000..a810c6838d9 --- /dev/null +++ b/irohad/ametsuchi/impl/storage_base.hpp @@ -0,0 +1,179 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_STORAGE_BASE_HPP +#define IROHA_STORAGE_BASE_HPP + +#include "ametsuchi/storage.hpp" + +#include +#include + +#include +#include "ametsuchi/block_storage_factory.hpp" +#include "ametsuchi/impl/pool_wrapper.hpp" +#include "ametsuchi/indexer.hpp" +#include "ametsuchi/key_value_storage.hpp" +#include "ametsuchi/ledger_state.hpp" +#include "ametsuchi/mutable_storage.hpp" +#include "ametsuchi/reconnection_strategy.hpp" +#include "ametsuchi/wsv_command.hpp" +#include "interfaces/permission_to_string.hpp" +#include "logger/logger_fwd.hpp" +#include "logger/logger_manager_fwd.hpp" + +namespace shared_model::interface { + class QueryResponseFactory; +} // namespace shared_model::interface +namespace iroha { + class PendingTransactionStorage; +} + +namespace iroha::ametsuchi { + + class AmetsuchiTest; + class PostgresOptions; + class VmCaller; + + class StorageBase : public Storage { + std::shared_ptr block_store_; + std::function)> + callback_; + std::shared_ptr + perm_converter_; + std::shared_ptr pending_txs_storage_; + std::shared_ptr + query_response_factory_; + std::unique_ptr temporary_block_storage_factory_; + std::optional> vm_caller_ref_; + logger::LoggerManagerTreePtr log_manager_; + logger::LoggerPtr log_; + boost::optional> ledger_state_; + bool prepared_blocks_enabled_; + std::atomic block_is_prepared_; + std::string prepared_block_name_; + + protected: + CommitResult commitPreparedImpl( + std::shared_ptr block, + DatabaseTransaction &db_context, + WsvCommand &wsv_command, + WsvQuery &wsv_query, + std::unique_ptr indexer); + + public: + using StoreBlockResult = iroha::expected::Result; + + StorageBase(StorageBase &&) = delete; + StorageBase(StorageBase const &) = delete; + + StorageBase &operator=(StorageBase &&) = delete; + StorageBase &operator=(StorageBase const &) = delete; + + boost::optional> createPeerQuery() + const override; + + bool preparedCommitEnabled() const override; + + boost::optional> createBlockQuery() + const override { + auto block_query = getBlockQuery(); + if (not block_query) { + return boost::none; + } + return boost::make_optional(block_query); + } + + logger::LoggerManagerTreePtr logManager() const { + return log_manager_; + } + + auto &blockIsPrepared() { + return block_is_prepared_; + } + + std::shared_ptr blockStore() const { + return block_store_; + } + + std::shared_ptr permConverter() + const { + return perm_converter_; + } + + logger::LoggerPtr log() const { + return log_; + } + + std::shared_ptr pendingTxStorage() const { + return pending_txs_storage_; + } + + auto &temporaryBlockStorageFactory() { + return temporary_block_storage_factory_; + } + + std::shared_ptr + queryResponseFactory() const { + return query_response_factory_; + } + + std::optional> vmCaller() const { + return vm_caller_ref_; + } + + boost::optional> ledgerState() + const { + return ledger_state_; + } + + void ledgerState( + boost::optional> const + &value) { + ledger_state_ = value; + } + + expected::Result insertBlock( + std::shared_ptr block) override; + + expected::Result dropBlockStorage() override; + + boost::optional> getLedgerState() + const override; + + CommitResult commit( + std::unique_ptr mutable_storage) override; + + void prepareBlockImpl(std::unique_ptr wsv, + DatabaseTransaction &db_context); + + /** + * add block to block storage + */ + StoreBlockResult storeBlock( + std::shared_ptr block); + + StorageBase( + boost::optional> ledger_state, + std::shared_ptr block_store, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::optional> vm_caller_ref, + logger::LoggerManagerTreePtr log_manager, + std::string const &prepared_block_name, + std::function)> callback, + bool prepared_blocks_enabled); + + ~StorageBase(); + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_STORAGE_BASE_HPP diff --git a/irohad/ametsuchi/impl/storage_impl.cpp b/irohad/ametsuchi/impl/storage_impl.cpp index b566b77c019..18d7dd61809 100644 --- a/irohad/ametsuchi/impl/storage_impl.cpp +++ b/irohad/ametsuchi/impl/storage_impl.cpp @@ -5,17 +5,12 @@ #include "ametsuchi/impl/storage_impl.hpp" -#include - #include #include -#include -#include -#include -#include + +#include "ametsuchi/impl/block_index_impl.hpp" #include "ametsuchi/impl/mutable_storage_impl.hpp" #include "ametsuchi/impl/peer_query_wsv.hpp" -#include "ametsuchi/impl/postgres_block_index.hpp" #include "ametsuchi/impl/postgres_block_query.hpp" #include "ametsuchi/impl/postgres_block_storage_factory.hpp" #include "ametsuchi/impl/postgres_command_executor.hpp" @@ -24,454 +19,321 @@ #include "ametsuchi/impl/postgres_query_executor.hpp" #include "ametsuchi/impl/postgres_setting_query.hpp" #include "ametsuchi/impl/postgres_specific_query_executor.hpp" +#include "ametsuchi/impl/postgres_temporary_wsv_impl.hpp" #include "ametsuchi/impl/postgres_wsv_command.hpp" #include "ametsuchi/impl/postgres_wsv_query.hpp" -#include "ametsuchi/impl/temporary_wsv_impl.hpp" #include "ametsuchi/ledger_state.hpp" #include "ametsuchi/tx_executor.hpp" #include "backend/protobuf/permissions.hpp" -#include "common/bind.hpp" #include "common/byteutils.hpp" #include "common/result.hpp" +#include "common/result_try.hpp" #include "logger/logger.hpp" #include "logger/logger_manager.hpp" #include "main/impl/pg_connection_init.hpp" -namespace iroha { - namespace ametsuchi { - - const char *kCommandExecutorError = "Cannot create CommandExecutorFactory"; - const char *kPsqlBroken = "Connection to PostgreSQL broken: %s"; - const char *kTmpWsv = "TemporaryWsv"; - - StorageImpl::StorageImpl( - boost::optional> ledger_state, - const ametsuchi::PostgresOptions &postgres_options, - std::shared_ptr block_store, - std::shared_ptr pool_wrapper, - std::shared_ptr - perm_converter, - std::shared_ptr pending_txs_storage, - std::shared_ptr - query_response_factory, - std::unique_ptr temporary_block_storage_factory, - size_t pool_size, - std::optional> vm_caller_ref, - logger::LoggerManagerTreePtr log_manager) - : block_store_(std::move(block_store)), - pool_wrapper_(std::move(pool_wrapper)), - connection_(pool_wrapper_->connection_pool_), - notifier_(notifier_lifetime_), - perm_converter_(std::move(perm_converter)), - pending_txs_storage_(std::move(pending_txs_storage)), - query_response_factory_(std::move(query_response_factory)), - temporary_block_storage_factory_( - std::move(temporary_block_storage_factory)), - vm_caller_ref_(std::move(vm_caller_ref)), - log_manager_(std::move(log_manager)), - log_(log_manager_->getLogger()), - pool_size_(pool_size), - prepared_blocks_enabled_( - pool_wrapper_->enable_prepared_transactions_), - block_is_prepared_(false), - prepared_block_name_(postgres_options.preparedBlockName()), - ledger_state_(std::move(ledger_state)) {} - - std::unique_ptr StorageImpl::createTemporaryWsv( - std::shared_ptr command_executor) { - auto postgres_command_executor = - std::dynamic_pointer_cast(command_executor); - if (postgres_command_executor == nullptr) { - throw std::runtime_error("Bad PostgresCommandExecutor cast!"); - } - // if we create temporary storage, then we intend to validate a new - // proposal. this means that any state prepared before that moment is - // not needed and must be removed to prevent locking - tryRollback(postgres_command_executor->getSession()); - return std::make_unique( - std::move(postgres_command_executor), - log_manager_->getChild("TemporaryWorldStateView")); - } - - expected::Result, std::string> - StorageImpl::createMutableStorage( - std::shared_ptr command_executor) { - return createMutableStorage(std::move(command_executor), - *temporary_block_storage_factory_); - } - - boost::optional> StorageImpl::createPeerQuery() - const { - auto wsv = getWsvQuery(); - if (not wsv) { - return boost::none; - } - return boost::make_optional>( - std::make_shared(wsv)); - } - - boost::optional> StorageImpl::createBlockQuery() - const { - auto block_query = getBlockQuery(); - if (not block_query) { - return boost::none; - } - return boost::make_optional(block_query); - } - - iroha::expected::Result, std::string> - StorageImpl::createQueryExecutor( - std::shared_ptr pending_txs_storage, - std::shared_ptr - response_factory) const { - std::shared_lock lock(drop_mutex_); - if (not connection_) { - return "createQueryExecutor: connection to database is not initialised"; - } - auto sql = std::make_unique(*connection_); - auto log_manager = log_manager_->getChild("QueryExecutor"); - return std::make_unique( - std::move(sql), - response_factory, - std::make_shared( - *sql, - *block_store_, - std::move(pending_txs_storage), - response_factory, - perm_converter_, - log_manager->getChild("SpecificQueryExecutor")->getLogger()), - log_manager->getLogger()); +namespace iroha::ametsuchi { + + StorageImpl::StorageImpl( + boost::optional> ledger_state, + const ametsuchi::PostgresOptions &postgres_options, + std::shared_ptr block_store, + std::shared_ptr pool_wrapper, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + size_t pool_size, + std::optional> vm_caller_ref, + std::function)> + callback, + logger::LoggerManagerTreePtr log_manager) + : StorageBase(std::move(ledger_state), + std::move(block_store), + std::move(perm_converter), + std::move(pending_txs_storage), + std::move(query_response_factory), + std::move(temporary_block_storage_factory), + std::move(vm_caller_ref), + std::move(log_manager), + postgres_options.preparedBlockName(), + std::move(callback), + pool_wrapper->enable_prepared_transactions_), + pool_wrapper_(pool_wrapper), + connection_(pool_wrapper->connection_pool_), + pool_size_(pool_size), + prepared_block_name_(postgres_options.preparedBlockName()) {} + + std::unique_ptr StorageImpl::createTemporaryWsv( + std::shared_ptr command_executor) { + auto postgres_command_executor = + std::dynamic_pointer_cast(command_executor); + if (postgres_command_executor == nullptr) { + throw std::runtime_error("Bad PostgresCommandExecutor cast!"); } - - expected::Result StorageImpl::insertBlock( - std::shared_ptr block) { - log_->info("create mutable storage"); - return createCommandExecutor() | [&](auto &&command_executor) { - return createMutableStorage(std::move(command_executor)) | - [&](auto &&mutable_storage) - -> expected::Result { - const bool is_inserted = mutable_storage->apply(block); - commit(std::move(mutable_storage)); - if (is_inserted) { - return {}; - } - return "Stateful validation failed."; - }; - }; + // if we create temporary storage, then we intend to validate a new + // proposal. this means that any state prepared before that moment is + // not needed and must be removed to prevent locking + tryRollback(postgres_command_executor->getSession()); + return std::make_unique( + std::move(postgres_command_executor), + logManager()->getChild("TemporaryWorldStateView")); + } + + iroha::expected::Result, std::string> + StorageImpl::createQueryExecutor( + std::shared_ptr pending_txs_storage, + std::shared_ptr + response_factory) const { + std::shared_lock lock(drop_mutex_); + if (not connection_) { + return "createQueryExecutor: connection to database is not initialised"; } - - expected::Result StorageImpl::insertPeer( - const shared_model::interface::Peer &peer) { - log_->info("Insert peer {}", peer.pubkey()); - soci::session sql(*connection_); - PostgresWsvCommand wsv_command(sql); - return wsv_command.insertPeer(peer); + auto sql = std::make_unique(*connection_); + auto log_manager = logManager()->getChild("QueryExecutor"); + return std::make_unique( + std::move(sql), + response_factory, + std::make_shared( + *sql, + *blockStore(), + std::move(pending_txs_storage), + response_factory, + permConverter(), + log_manager->getChild("SpecificQueryExecutor")->getLogger()), + log_manager->getLogger()); + } + + expected::Result StorageImpl::insertPeer( + const shared_model::interface::Peer &peer) { + log()->info("Insert peer {}", peer.pubkey()); + soci::session sql(*connection_); + PostgresWsvCommand wsv_command(sql); + return wsv_command.insertPeer(peer); + } + + expected::Result, std::string> + StorageImpl::createCommandExecutor() { + std::shared_lock lock(drop_mutex_); + if (connection_ == nullptr) { + return expected::makeError("Connection was closed"); } - - expected::Result, std::string> - StorageImpl::createCommandExecutor() { - std::shared_lock lock(drop_mutex_); - if (connection_ == nullptr) { - return expected::makeError("Connection was closed"); - } - auto sql = std::make_unique(*connection_); - return std::make_unique( - std::move(sql), - perm_converter_, - std::make_shared( - *sql, - *block_store_, - pending_txs_storage_, - query_response_factory_, - perm_converter_, - log_manager_->getChild("SpecificQueryExecutor")->getLogger()), - vm_caller_ref_); + auto sql = std::make_unique(*connection_); + return std::make_unique( + std::move(sql), + permConverter(), + std::make_shared( + *sql, + *blockStore(), + pendingTxStorage(), + queryResponseFactory(), + permConverter(), + logManager()->getChild("SpecificQueryExecutor")->getLogger()), + vmCaller()); + } + + expected::Result, std::string> + StorageImpl::createMutableStorage( + std::shared_ptr command_executor) { + return createMutableStorage(std::move(command_executor), + *temporaryBlockStorageFactory()); + } + + expected::Result, std::string> + StorageImpl::createMutableStorage( + std::shared_ptr command_executor, + BlockStorageFactory &storage_factory) { + auto postgres_command_executor = + std::dynamic_pointer_cast(command_executor); + if (postgres_command_executor == nullptr) { + throw std::runtime_error("Bad PostgresCommandExecutor cast!"); } - - expected::Result, std::string> - StorageImpl::createMutableStorage( - std::shared_ptr command_executor, - BlockStorageFactory &storage_factory) { - auto postgres_command_executor = - std::dynamic_pointer_cast(command_executor); - if (postgres_command_executor == nullptr) { - throw std::runtime_error("Bad PostgresCommandExecutor cast!"); - } - // if we create mutable storage, then we intend to mutate wsv - // this means that any state prepared before that moment is not needed - // and must be removed to prevent locking - tryRollback(postgres_command_executor->getSession()); - return std::make_unique( - ledger_state_, - std::move(postgres_command_executor), - storage_factory.create().assumeValue(), - log_manager_->getChild("MutableStorageImpl")); + // if we create mutable storage, then we intend to mutate wsv + // this means that any state prepared before that moment is not needed + // and must be removed to prevent locking + tryRollback(postgres_command_executor->getSession()); + + auto ms_log_manager = logManager()->getChild("MutableStorageImpl"); + + auto wsv_command = std::make_unique( + postgres_command_executor->getSession()); + + auto peer_query = + std::make_unique(std::make_shared( + postgres_command_executor->getSession(), + ms_log_manager->getChild("WsvQuery")->getLogger())); + + auto block_index = std::make_unique( + std::make_unique( + postgres_command_executor->getSession()), + ms_log_manager->getChild("BlockIndexImpl")->getLogger()); + + return std::make_unique( + ledgerState(), + std::move(wsv_command), + std::move(peer_query), + std::move(block_index), + std::move(postgres_command_executor), + storage_factory.create().assumeValue(), + std::move(ms_log_manager)); + } + + iroha::expected::Result StorageImpl::resetPeers() { + log()->info("Remove everything from peers table"); + soci::session sql(*connection_); + return PgConnectionInit::resetPeers(sql); + } + + void StorageImpl::freeConnections() { + std::unique_lock lock(drop_mutex_); + if (connection_ == nullptr) { + log()->warn("Tried to free connections without active connection"); + return; } - - void StorageImpl::resetPeers() { - log_->info("Remove everything from peers table"); + // rollback possible prepared transaction + { soci::session sql(*connection_); - expected::resultToOptionalError(PgConnectionInit::resetPeers(sql)) | - [this](const auto &e) { this->log_->error("{}", e); }; - } - - expected::Result StorageImpl::dropBlockStorage() { - log_->info("drop block storage"); - block_store_->clear(); - return iroha::expected::Value{}; - } - - boost::optional> - StorageImpl::getLedgerState() const { - return ledger_state_; - } - - void StorageImpl::freeConnections() { - std::unique_lock lock(drop_mutex_); - if (connection_ == nullptr) { - log_->warn("Tried to free connections without active connection"); - return; - } - // rollback possible prepared transaction - { - soci::session sql(*connection_); - tryRollback(sql); - } - std::vector> sessions; - for (size_t i = 0; i < pool_size_; i++) { - sessions.push_back(std::make_shared(*connection_)); - sessions.at(i)->close(); - log_->debug("Closed connection {}", i); - } - sessions.clear(); - connection_.reset(); + tryRollback(sql); } - - expected::Result, std::string> - StorageImpl::create( - const ametsuchi::PostgresOptions &postgres_options, - std::shared_ptr pool_wrapper, - std::shared_ptr - perm_converter, - std::shared_ptr pending_txs_storage, - std::shared_ptr - query_response_factory, - std::unique_ptr temporary_block_storage_factory, - std::shared_ptr persistent_block_storage, - std::optional> vm_caller_ref, - logger::LoggerManagerTreePtr log_manager, - size_t pool_size) { - boost::optional> ledger_state; - { - soci::session sql{*pool_wrapper->connection_pool_}; - PostgresWsvQuery wsv_query( - sql, log_manager->getChild("WsvQuery")->getLogger()); - - ledger_state = - expected::resultToOptionalValue(wsv_query.getTopBlockInfo()) | - [&](auto &&top_block_info) { - return wsv_query.getPeers() | - [&top_block_info](auto &&ledger_peers) { - return boost::make_optional( - std::make_shared( - std::move(ledger_peers), - top_block_info.height, - top_block_info.top_hash)); - }; - }; - } - - return expected::makeValue(std::shared_ptr( - new StorageImpl(std::move(ledger_state), - std::move(postgres_options), - std::move(persistent_block_storage), - std::move(pool_wrapper), - perm_converter, - std::move(pending_txs_storage), - std::move(query_response_factory), - std::move(temporary_block_storage_factory), - pool_size, - std::move(vm_caller_ref), - std::move(log_manager)))); + std::vector> sessions; + for (size_t i = 0; i < pool_size_; i++) { + sessions.push_back(std::make_shared(*connection_)); + sessions.at(i)->close(); + log()->debug("Closed connection {}", i); } - - CommitResult StorageImpl::commit( - std::unique_ptr mutable_storage) { - auto old_height = block_store_->size(); - return std::move(*mutable_storage).commit(*block_store_) | - [this, old_height](auto commit_result) -> CommitResult { - ledger_state_ = commit_result.ledger_state; - auto new_height = block_store_->size(); - for (auto height = old_height + 1; height <= new_height; ++height) { - auto maybe_block = block_store_->fetch(height); - if (not maybe_block) { - return fmt::format("Failed to fetch block {}", height); - } - notifier_.get_subscriber().on_next(*std::move(maybe_block)); - } - return expected::makeValue(std::move(commit_result.ledger_state)); - }; + sessions.clear(); + connection_.reset(); + } + + expected::Result, std::string> + StorageImpl::create( + const ametsuchi::PostgresOptions &postgres_options, + std::shared_ptr pool_wrapper, + std::shared_ptr + perm_converter, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + std::unique_ptr temporary_block_storage_factory, + std::shared_ptr persistent_block_storage, + std::optional> vm_caller_ref, + std::function)> + callback, + logger::LoggerManagerTreePtr log_manager, + size_t pool_size) { + boost::optional> ledger_state; + { + soci::session sql{*pool_wrapper->connection_pool_}; + PostgresWsvQuery wsv_query( + sql, log_manager->getChild("WsvQuery")->getLogger()); + + auto maybe_top_block_info = wsv_query.getTopBlockInfo(); + auto maybe_ledger_peers = wsv_query.getPeers(); + + if (expected::hasValue(maybe_top_block_info) and maybe_ledger_peers) + ledger_state = std::make_shared( + std::move(*maybe_ledger_peers), + maybe_top_block_info.assumeValue().height, + maybe_top_block_info.assumeValue().top_hash); } - bool StorageImpl::preparedCommitEnabled() const { - return prepared_blocks_enabled_ and block_is_prepared_; + return expected::makeValue(std::shared_ptr( + new StorageImpl(std::move(ledger_state), + std::move(postgres_options), + std::move(persistent_block_storage), + std::move(pool_wrapper), + perm_converter, + std::move(pending_txs_storage), + std::move(query_response_factory), + std::move(temporary_block_storage_factory), + pool_size, + std::move(vm_caller_ref), + std::move(callback), + std::move(log_manager)))); + } + + CommitResult StorageImpl::commitPrepared( + std::shared_ptr block) { + std::shared_lock lock(drop_mutex_); + if (not connection_) { + std::string msg( + "commitPrepared: connection to database is not initialised"); + return expected::makeError(std::move(msg)); } - CommitResult StorageImpl::commitPrepared( - std::shared_ptr block) { - if (not prepared_blocks_enabled_) { - return expected::makeError( - std::string{"prepared blocks are not enabled"}); - } + soci::session sql(*connection_); + PostgresDbTransaction db_context(sql); - if (not block_is_prepared_) { - return expected::makeError("there are no prepared blocks"); - } + PostgresWsvCommand wsv_command{sql}; + PostgresWsvQuery wsv_query( + sql, this->logManager()->getChild("WsvQuery")->getLogger()); + auto indexer = std::make_unique(sql); - log_->info("applying prepared block"); - - try { - std::shared_lock lock(drop_mutex_); - if (not connection_) { - std::string msg( - "commitPrepared: connection to database is not initialised"); - return expected::makeError(std::move(msg)); - } - - if (not block_store_->insert(block)) { - return fmt::format("Failed to insert block {}", *block); - } - - soci::session sql(*connection_); - sql << "COMMIT PREPARED '" + prepared_block_name_ + "';"; - PostgresBlockIndex block_index( - std::make_unique(sql), - log_manager_->getChild("BlockIndex")->getLogger()); - block_index.index(*block); - block_is_prepared_ = false; - - if (auto e = expected::resultToOptionalError( - PostgresWsvCommand{sql}.setTopBlockInfo( - TopBlockInfo{block->height(), block->hash()}))) { - throw std::runtime_error(e.value()); - } - - notifier_.get_subscriber().on_next(block); - - decltype(std::declval().getPeers()) opt_ledger_peers; - { - auto peer_query = PostgresWsvQuery( - sql, this->log_manager_->getChild("WsvQuery")->getLogger()); - if (not(opt_ledger_peers = peer_query.getPeers())) { - return expected::makeError( - std::string{"Failed to get ledger peers! Will retry."}); - } - } - assert(opt_ledger_peers); - - ledger_state_ = std::make_shared( - std::move(*opt_ledger_peers), block->height(), block->hash()); - return expected::makeValue(ledger_state_.value()); - } catch (const std::exception &e) { - std::string msg((boost::format("failed to apply prepared block %s: %s") - % block->hash().hex() % e.what()) - .str()); - return expected::makeError(msg); - } - } - - std::shared_ptr StorageImpl::getWsvQuery() const { - std::shared_lock lock(drop_mutex_); - if (not connection_) { - log_->info("getWsvQuery: connection to database is not initialised"); - return nullptr; - } - return std::make_shared( - std::make_unique(*connection_), - log_manager_->getChild("WsvQuery")->getLogger()); - } + return StorageBase::commitPreparedImpl( + block, db_context, wsv_command, wsv_query, std::move(indexer)); + } - std::shared_ptr StorageImpl::getBlockQuery() const { - std::shared_lock lock(drop_mutex_); - if (not connection_) { - log_->info("getBlockQuery: connection to database is not initialised"); - return nullptr; - } - return std::make_shared( - std::make_unique(*connection_), - *block_store_, - log_manager_->getChild("PostgresBlockQuery")->getLogger()); + std::shared_ptr StorageImpl::getWsvQuery() const { + std::shared_lock lock(drop_mutex_); + if (not connection_) { + log()->info("getWsvQuery: connection to database is not initialised"); + return nullptr; } - - boost::optional> - StorageImpl::createSettingQuery() const { - std::shared_lock lock(drop_mutex_); - if (not connection_) { - log_->info( - "getSettingQuery: connection to database is not initialised"); - return boost::none; - } - std::unique_ptr setting_query_ptr = - std::make_unique( - std::make_unique(*connection_), - log_manager_->getChild("PostgresSettingQuery")->getLogger()); - return boost::make_optional(std::move(setting_query_ptr)); + return std::make_shared( + std::make_unique(*connection_), + logManager()->getChild("WsvQuery")->getLogger()); + } + + std::shared_ptr StorageImpl::getBlockQuery() const { + std::shared_lock lock(drop_mutex_); + if (not connection_) { + log()->info("getBlockQuery: connection to database is not initialised"); + return nullptr; } - - rxcpp::observable> - StorageImpl::on_commit() { - return notifier_.get_observable(); + return std::make_shared( + std::make_unique(*connection_), + *blockStore(), + logManager()->getChild("PostgresBlockQuery")->getLogger()); + } + + boost::optional> + StorageImpl::createSettingQuery() const { + std::shared_lock lock(drop_mutex_); + if (not connection_) { + log()->info("getSettingQuery: connection to database is not initialised"); + return boost::none; } - - void StorageImpl::prepareBlock(std::unique_ptr wsv) { - auto &wsv_impl = static_cast(*wsv); - if (not prepared_blocks_enabled_) { - log_->warn("prepared blocks are not enabled"); - return; - } - if (block_is_prepared_) { - log_->warn( - "Refusing to add new prepared state, because there already is one. " - "Multiple prepared states are not yet supported."); + std::unique_ptr setting_query_ptr = + std::make_unique( + std::make_unique(*connection_), + logManager()->getChild("PostgresSettingQuery")->getLogger()); + return boost::make_optional(std::move(setting_query_ptr)); + } + + void StorageImpl::prepareBlock(std::unique_ptr wsv) { + auto &wsv_impl = static_cast(*wsv); + PostgresDbTransaction db_context(wsv_impl.getSession()); + StorageBase::prepareBlockImpl(std::move(wsv), db_context); + } + + StorageImpl::~StorageImpl() { + freeConnections(); + } + + void StorageImpl::tryRollback(soci::session &session) { + // TODO 17.06.2019 luckychess IR-568 split connection and schema + // initialisation + if (blockIsPrepared()) { + auto result = + PgConnectionInit::rollbackPrepared(session, prepared_block_name_); + if (iroha::expected::hasError(result)) { + log()->info("Block rollback error: {}", result.assumeError()); } else { - soci::session &sql = wsv_impl.sql_; - try { - sql << "PREPARE TRANSACTION '" + prepared_block_name_ + "';"; - block_is_prepared_ = true; - } catch (const std::exception &e) { - log_->warn("failed to prepare state: {}", e.what()); - } - - log_->info("state prepared successfully"); - } - } - - StorageImpl::~StorageImpl() { - notifier_lifetime_.unsubscribe(); - freeConnections(); - } - - StorageImpl::StoreBlockResult StorageImpl::storeBlock( - std::shared_ptr block) { - if (block_store_->insert(block)) { - notifier_.get_subscriber().on_next(block); - return {}; - } - return expected::makeError("Block insertion to storage failed"); - } - - void StorageImpl::tryRollback(soci::session &session) { - // TODO 17.06.2019 luckychess IR-568 split connection and schema - // initialisation - if (block_is_prepared_) { - PgConnectionInit::rollbackPrepared(session, prepared_block_name_) - .match([this](auto &&v) { block_is_prepared_ = false; }, - [this](auto &&e) { - log_->info("Block rollback error: {}", - std::move(e.error)); - }); + blockIsPrepared() = false; } } + } - } // namespace ametsuchi -} // namespace iroha +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/storage_impl.hpp b/irohad/ametsuchi/impl/storage_impl.hpp index 800829f10f1..779c38e05cb 100644 --- a/irohad/ametsuchi/impl/storage_impl.hpp +++ b/irohad/ametsuchi/impl/storage_impl.hpp @@ -6,22 +6,9 @@ #ifndef IROHA_STORAGE_IMPL_HPP #define IROHA_STORAGE_IMPL_HPP -#include "ametsuchi/storage.hpp" - -#include -#include +#include "ametsuchi/impl/storage_base.hpp" #include -#include -#include -#include "ametsuchi/block_storage_factory.hpp" -#include "ametsuchi/impl/pool_wrapper.hpp" -#include "ametsuchi/key_value_storage.hpp" -#include "ametsuchi/ledger_state.hpp" -#include "ametsuchi/reconnection_strategy.hpp" -#include "interfaces/permission_to_string.hpp" -#include "logger/logger_fwd.hpp" -#include "logger/logger_manager_fwd.hpp" namespace shared_model { namespace interface { @@ -38,7 +25,7 @@ namespace iroha { class PostgresOptions; class VmCaller; - class StorageImpl : public Storage { + class StorageImpl final : public StorageBase { public: static expected::Result, std::string> create( const PostgresOptions &postgres_options, @@ -51,6 +38,8 @@ namespace iroha { std::unique_ptr temporary_block_storage_factory, std::shared_ptr persistent_block_storage, std::optional> vm_caller_ref, + std::function)> callback, logger::LoggerManagerTreePtr log_manager, size_t pool_size = 10); @@ -60,16 +49,6 @@ namespace iroha { std::unique_ptr createTemporaryWsv( std::shared_ptr command_executor) override; - iroha::expected::Result, std::string> - createMutableStorage( - std::shared_ptr command_executor) override; - - boost::optional> createPeerQuery() - const override; - - boost::optional> createBlockQuery() - const override; - boost::optional> createSettingQuery() const override; @@ -79,9 +58,6 @@ namespace iroha { std::shared_ptr response_factory) const override; - iroha::expected::Result insertBlock( - std::shared_ptr block) override; - expected::Result insertPeer( const shared_model::interface::Peer &peer) override; @@ -89,20 +65,14 @@ namespace iroha { createMutableStorage(std::shared_ptr command_executor, BlockStorageFactory &storage_factory) override; - void resetPeers() override; - - expected::Result dropBlockStorage() override; + expected::Result, std::string> + createMutableStorage( + std::shared_ptr command_executor) override; - boost::optional> - getLedgerState() const override; + expected::Result resetPeers() override; void freeConnections() override; - CommitResult commit( - std::unique_ptr mutable_storage) override; - - bool preparedCommitEnabled() const override; - CommitResult commitPrepared( std::shared_ptr block) override; @@ -110,9 +80,6 @@ namespace iroha { std::shared_ptr getBlockQuery() const override; - rxcpp::observable> - on_commit() override; - void prepareBlock(std::unique_ptr wsv) override; ~StorageImpl() override; @@ -132,62 +99,24 @@ namespace iroha { std::unique_ptr temporary_block_storage_factory, size_t pool_size, std::optional> vm_caller, + std::function)> callback, logger::LoggerManagerTreePtr log_manager); private: - using StoreBlockResult = iroha::expected::Result; - friend class ::iroha::ametsuchi::AmetsuchiTest; - /** - * add block to block storage - */ - StoreBlockResult storeBlock( - std::shared_ptr block); - /** * Method tries to perform rollback on passed session */ void tryRollback(soci::session &session); - std::shared_ptr block_store_; - - std::shared_ptr pool_wrapper_; - /// ref for pool_wrapper_::connection_pool_ + std::shared_ptr pool_wrapper_; std::shared_ptr &connection_; - - rxcpp::composite_subscription notifier_lifetime_; - rxcpp::subjects::subject< - std::shared_ptr> - notifier_; - - std::shared_ptr - perm_converter_; - - std::shared_ptr pending_txs_storage_; - - std::shared_ptr - query_response_factory_; - - std::unique_ptr temporary_block_storage_factory_; - - std::optional> vm_caller_ref_; - - logger::LoggerManagerTreePtr log_manager_; - logger::LoggerPtr log_; - mutable std::shared_timed_mutex drop_mutex_; - const size_t pool_size_; - - bool prepared_blocks_enabled_; - - std::atomic block_is_prepared_; - std::string prepared_block_name_; - - boost::optional> ledger_state_; }; } // namespace ametsuchi } // namespace iroha diff --git a/irohad/ametsuchi/impl/temporary_wsv_impl.cpp b/irohad/ametsuchi/impl/temporary_wsv_impl.cpp index c81c61c8772..fd03c4a8b0f 100644 --- a/irohad/ametsuchi/impl/temporary_wsv_impl.cpp +++ b/irohad/ametsuchi/impl/temporary_wsv_impl.cpp @@ -5,9 +5,6 @@ #include "ametsuchi/impl/temporary_wsv_impl.hpp" -#include -#include -#include #include "ametsuchi/impl/postgres_command_executor.hpp" #include "ametsuchi/tx_executor.hpp" #include "interfaces/commands/command.hpp" @@ -16,135 +13,85 @@ #include "logger/logger.hpp" #include "logger/logger_manager.hpp" -namespace iroha { - namespace ametsuchi { - TemporaryWsvImpl::TemporaryWsvImpl( - std::shared_ptr command_executor, - logger::LoggerManagerTreePtr log_manager) - : sql_(command_executor->getSession()), - transaction_executor_(std::make_unique( - std::move(command_executor))), - log_manager_(std::move(log_manager)), - log_(log_manager_->getLogger()) { - sql_ << "BEGIN"; - } - - expected::Result - TemporaryWsvImpl::validateSignatures( - const shared_model::interface::Transaction &transaction) { - auto keys_range = transaction.signatures() - | boost::adaptors::transformed( - [](const auto &s) { return s.publicKey(); }); - auto keys = boost::algorithm::join(keys_range, "'), ('"); - // not using bool since it is not supported by SOCI - boost::optional signatories_valid; +namespace iroha::ametsuchi { + TemporaryWsvImpl::TemporaryWsvImpl( + std::shared_ptr command_executor, + logger::LoggerManagerTreePtr log_manager) + : tx_(command_executor->dbSession()), + transaction_executor_( + std::make_unique(std::move(command_executor))), + log_manager_(std::move(log_manager)), + log_(log_manager_->getLogger()) { + tx_.begin(); + } - boost::format query(R"(SELECT sum(count) = :signatures_count - AND sum(quorum) <= :signatures_count - FROM - (SELECT count(public_key) - FROM ( VALUES ('%s') ) AS CTE1(public_key) - WHERE lower(public_key) IN - (SELECT public_key - FROM account_has_signatory - WHERE account_id = :account_id ) ) AS CTE2(count), - (SELECT quorum - FROM account - WHERE account_id = :account_id) AS CTE3(quorum))"); - - try { - auto keys_range_size = boost::size(keys_range); - sql_ << (query % keys).str(), soci::into(signatories_valid), - soci::use(keys_range_size, "signatures_count"), - soci::use(transaction.creatorAccountId(), "account_id"); - } catch (const std::exception &e) { - auto error_str = "Transaction " + transaction.toString() - + " failed signatures validation with db error: " + e.what(); - // TODO [IR-1816] Akvinikym 29.10.18: substitute error code magic number - // with named constant - return expected::makeError(validation::CommandError{ - "signatures validation", 1, error_str, false}); + expected::Result TemporaryWsvImpl::apply( + const shared_model::interface::Transaction &transaction) { + auto savepoint_wrapper = createSavepoint("savepoint_temp_wsv"); + return validateSignatures(transaction) | + [this, savepoint = std::move(savepoint_wrapper), &transaction]() + -> expected::Result { + if (auto error = expected::resultToOptionalError( + transaction_executor_->execute(transaction, true))) { + return expected::makeError( + validation::CommandError{error->command_error.command_name, + error->command_error.error_code, + error->command_error.error_extra, + true, + error->command_index}); } + // success + savepoint->release(); + return {}; + }; + } - if (signatories_valid and *signatories_valid) { - return {}; - } else { - auto error_str = "Transaction " + transaction.toString() - + " failed signatures validation"; - // TODO [IR-1816] Akvinikym 29.10.18: substitute error code magic number - // with named constant - return expected::makeError(validation::CommandError{ - "signatures validation", 2, error_str, false}); - } - } - - expected::Result TemporaryWsvImpl::apply( - const shared_model::interface::Transaction &transaction) { - auto savepoint_wrapper = createSavepoint("savepoint_temp_wsv"); + std::unique_ptr + TemporaryWsvImpl::createSavepoint(const std::string &name) { + return std::make_unique( + SavepointWrapperImpl( + tx_, + name, + log_manager_->getChild("SavepointWrapper")->getLogger())); + } - return validateSignatures(transaction) | - [this, - savepoint = std::move(savepoint_wrapper), - &transaction]() - -> expected::Result { - if (auto error = expected::resultToOptionalError( - transaction_executor_->execute(transaction, true))) { - return expected::makeError( - validation::CommandError{error->command_error.command_name, - error->command_error.error_code, - error->command_error.error_extra, - true, - error->command_index}); - } - // success - savepoint->release(); - return {}; - }; + TemporaryWsvImpl::~TemporaryWsvImpl() { + try { + tx_.rollback(); + } catch (std::exception &e) { + log_->error("Rollback did not happen: {}", e.what()); } + } - std::unique_ptr - TemporaryWsvImpl::createSavepoint(const std::string &name) { - return std::make_unique( - SavepointWrapperImpl( - *this, - name, - log_manager_->getChild("SavepointWrapper")->getLogger())); - } + DatabaseTransaction &TemporaryWsvImpl::getDbTransaction() { + return tx_; + } - TemporaryWsvImpl::~TemporaryWsvImpl() { - try { - sql_ << "ROLLBACK"; - } catch (std::exception &e) { - log_->error("Rollback did not happen: {}", e.what()); - } - } + TemporaryWsvImpl::SavepointWrapperImpl::SavepointWrapperImpl( + DatabaseTransaction &tx, + std::string savepoint_name, + logger::LoggerPtr log) + : tx_(tx), + is_released_{false}, + log_(std::move(log)), + savepoint_name_(std::move(savepoint_name)) { + tx_.savepoint(savepoint_name_); + } - TemporaryWsvImpl::SavepointWrapperImpl::SavepointWrapperImpl( - const iroha::ametsuchi::TemporaryWsvImpl &wsv, - std::string savepoint_name, - logger::LoggerPtr log) - : sql_{wsv.sql_}, - savepoint_name_{std::move(savepoint_name)}, - is_released_{false}, - log_(std::move(log)) { - sql_ << "SAVEPOINT " + savepoint_name_ + ";"; - } + void TemporaryWsvImpl::SavepointWrapperImpl::release() { + is_released_ = true; + } - void TemporaryWsvImpl::SavepointWrapperImpl::release() { - is_released_ = true; - } - - TemporaryWsvImpl::SavepointWrapperImpl::~SavepointWrapperImpl() { - try { - if (not is_released_) { - sql_ << "ROLLBACK TO SAVEPOINT " + savepoint_name_ + ";"; - } else { - sql_ << "RELEASE SAVEPOINT " + savepoint_name_ + ";"; - } - } catch (std::exception &e) { - log_->error("SQL error. Reason: {}", e.what()); + TemporaryWsvImpl::SavepointWrapperImpl::~SavepointWrapperImpl() { + try { + if (not is_released_) { + tx_.rollbackToSavepoint(savepoint_name_); + } else { + tx_.releaseSavepoint(savepoint_name_); } + } catch (std::exception &e) { + log_->error("SQL error. Reason: {}", e.what()); } + } - } // namespace ametsuchi -} // namespace iroha +} // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/temporary_wsv_impl.hpp b/irohad/ametsuchi/impl/temporary_wsv_impl.hpp index badacdfc591..3da2d7441f0 100644 --- a/irohad/ametsuchi/impl/temporary_wsv_impl.hpp +++ b/irohad/ametsuchi/impl/temporary_wsv_impl.hpp @@ -8,8 +8,8 @@ #include "ametsuchi/temporary_wsv.hpp" -#include #include "ametsuchi/command_executor.hpp" +#include "ametsuchi/impl/db_transaction.hpp" #include "logger/logger_fwd.hpp" #include "logger/logger_manager_fwd.hpp" @@ -19,59 +19,55 @@ namespace shared_model { } } // namespace shared_model -namespace iroha { +namespace iroha::ametsuchi { + class TransactionExecutor; - namespace ametsuchi { - class PostgresCommandExecutor; - class TransactionExecutor; + class TemporaryWsvImpl : public TemporaryWsv { + friend class StorageImpl; - class TemporaryWsvImpl : public TemporaryWsv { - friend class StorageImpl; + public: + struct SavepointWrapperImpl final : public TemporaryWsv::SavepointWrapper { + SavepointWrapperImpl(DatabaseTransaction &tx, + std::string savepoint_name, + logger::LoggerPtr log); + ~SavepointWrapperImpl() override; - public: - struct SavepointWrapperImpl : public TemporaryWsv::SavepointWrapper { - SavepointWrapperImpl(const TemporaryWsvImpl &wsv, - std::string savepoint_name, - logger::LoggerPtr log); + void release() override; - void release() override; + private: + DatabaseTransaction &tx_; + bool is_released_; + logger::LoggerPtr log_; + std::string savepoint_name_; + }; - ~SavepointWrapperImpl() override; + TemporaryWsvImpl(std::shared_ptr command_executor, + logger::LoggerManagerTreePtr log_manager); - private: - soci::session &sql_; - std::string savepoint_name_; - bool is_released_; - logger::LoggerPtr log_; - }; + expected::Result apply( + const shared_model::interface::Transaction &transaction) override; - TemporaryWsvImpl( - std::shared_ptr command_executor, - logger::LoggerManagerTreePtr log_manager); + std::unique_ptr createSavepoint( + const std::string &name) override; - expected::Result apply( - const shared_model::interface::Transaction &transaction) override; + ~TemporaryWsvImpl() override; - std::unique_ptr createSavepoint( - const std::string &name) override; + DatabaseTransaction &getDbTransaction() override; - ~TemporaryWsvImpl() override; + protected: + /** + * Verifies whether transaction has at least quorum signatures and they + * are a subset of creator account signatories + */ + virtual expected::Result validateSignatures( + const shared_model::interface::Transaction &transaction) = 0; - private: - /** - * Verifies whether transaction has at least quorum signatures and they - * are a subset of creator account signatories - */ - expected::Result validateSignatures( - const shared_model::interface::Transaction &transaction); + DatabaseTransaction &tx_; + std::unique_ptr transaction_executor_; + logger::LoggerManagerTreePtr log_manager_; + logger::LoggerPtr log_; + }; - soci::session &sql_; - std::unique_ptr transaction_executor_; - - logger::LoggerManagerTreePtr log_manager_; - logger::LoggerPtr log_; - }; - } // namespace ametsuchi -} // namespace iroha +} // namespace iroha::ametsuchi #endif // IROHA_TEMPORARY_WSV_IMPL_HPP diff --git a/irohad/ametsuchi/impl/wsv_restorer_impl.cpp b/irohad/ametsuchi/impl/wsv_restorer_impl.cpp index 36627ab8f93..c15a4d292aa 100644 --- a/irohad/ametsuchi/impl/wsv_restorer_impl.cpp +++ b/irohad/ametsuchi/impl/wsv_restorer_impl.cpp @@ -5,7 +5,9 @@ #include "wsv_restorer_impl.hpp" -#include +#include +#include + #include "ametsuchi/block_query.hpp" #include "ametsuchi/block_storage.hpp" #include "ametsuchi/block_storage_factory.hpp" @@ -23,6 +25,14 @@ using shared_model::interface::types::HeightType; namespace { + using namespace std::chrono_literals; + + /** + * Time to wait for new block in blockstore for wait-for-new-blocks restore + * mode + */ + static constexpr std::chrono::milliseconds kWaitForBlockTime = 5000ms; + /** * Stub implementation used to restore WSV. Check the method descriptions for * details @@ -51,6 +61,8 @@ namespace { return 0; } + void reload() override {} + void clear() override {} /** @@ -98,55 +110,38 @@ namespace { iroha::validation::ChainValidator &validator, HeightType starting_height, HeightType ending_height) { - auto blocks = rxcpp::observable<>::create< - std::shared_ptr>([&block_query, - &interface_validator, - &proto_validator, - starting_height, - ending_height]( - auto s) { - for (auto height = starting_height; height <= ending_height; ++height) { - auto result = block_query.getBlock(height); - if (auto e = iroha::expected::resultToOptionalError(result)) { - s.on_error(std::make_exception_ptr( - std::runtime_error(std::move(e).value().message))); - return; - } + for (auto height = starting_height; height <= ending_height; ++height) { + auto result = block_query.getBlock(height); + if (hasError(result)) { + return std::move(result.assumeError().message); + } - auto block = std::move(result).assumeValue(); - if (height != block->height()) { - s.on_error(std::make_exception_ptr(std::runtime_error( - "inconsistent block height in block storage"))); - return; - } + auto block = std::move(result).assumeValue(); + if (height != block->height()) { + return iroha::expected::makeError( + "inconsistent block height in block storage"); + } - // do not validate genesis block - transactions may not have creators, - // block is not signed - if (height != 1) { - if (auto error = proto_validator.validate( - static_cast(block.get()) - ->getTransport())) { - s.on_error( - std::make_exception_ptr(std::runtime_error(error->toString()))); - return; - } + // do not validate genesis block - transactions may not have creators, + // block is not signed + if (height != 1) { + if (auto error = proto_validator.validate( + static_cast(block.get()) + ->getTransport())) { + return iroha::expected::makeError(error->toString()); + } - if (auto error = interface_validator.validate(*block)) { - s.on_error( - std::make_exception_ptr(std::runtime_error(error->toString()))); - return; - } + if (auto error = interface_validator.validate(*block)) { + return iroha::expected::makeError(error->toString()); } + } - s.on_next(std::move(block)); + if (not validator.validateAndApply(std::move(block), *mutable_storage)) { + return iroha::expected::makeError("Cannot validate and apply blocks!"); } - s.on_completed(); - }); - if (validator.validateAndApply(blocks, *mutable_storage)) { - return storage.commit(std::move(mutable_storage)); - } else { - return iroha::expected::makeError("Cannot validate and apply blocks!"); } + + return storage.commit(std::move(mutable_storage)); } } // namespace @@ -156,83 +151,118 @@ namespace iroha::ametsuchi { shared_model::interface::Block>> interface_validator, std::unique_ptr> proto_validator, - std::shared_ptr validator) + std::shared_ptr validator, + logger::LoggerPtr log) : interface_validator_{std::move(interface_validator)}, proto_validator_{std::move(proto_validator)}, - validator_{std::move(validator)} {} + validator_{std::move(validator)}, + log_{std::move(log)} {} - CommitResult WsvRestorerImpl::restoreWsv(Storage &storage) { + CommitResult WsvRestorerImpl::restoreWsv(Storage &storage, + bool wait_for_new_blocks) { return storage.createCommandExecutor() | - [this, &storage](auto &&command_executor) -> CommitResult { + [this, &storage, wait_for_new_blocks]( + std::shared_ptr command_executor) + -> CommitResult { BlockStorageStubFactory storage_factory; - return storage.createMutableStorage(std::move(command_executor), - storage_factory) - | [this, &storage](auto &&mutable_storage) -> CommitResult { - auto block_query = storage.getBlockQuery(); - if (not block_query) { - return expected::makeError("Cannot create BlockQuery"); - } + CommitResult res; + auto block_query = storage.getBlockQuery(); + auto last_block_in_storage = block_query->getTopBlockHeight(); - const auto last_block_in_storage = block_query->getTopBlockHeight(); - const auto wsv_ledger_state = storage.getLedgerState(); - - shared_model::interface::types::HeightType wsv_ledger_height; - if (wsv_ledger_state) { - const auto &wsv_top_block_info = - wsv_ledger_state.value()->top_block_info; - wsv_ledger_height = wsv_top_block_info.height; - if (wsv_ledger_height > last_block_in_storage) { - return fmt::format( - "WSV state (height {}) is more recent " - "than block storage (height {}).", - wsv_ledger_height, - last_block_in_storage); + do { + res = storage.createMutableStorage(command_executor, storage_factory) | + [this, &storage, &block_query, &last_block_in_storage]( + auto &&mutable_storage) -> CommitResult { + if (not block_query) { + return expected::makeError("Cannot create BlockQuery"); } - // check that a block with that height is present in the block - // storage and that its hash matches - auto check_top_block = - block_query->getBlock(wsv_top_block_info.height) - .match( - [&wsv_top_block_info]( - const auto &block_from_block_storage) - -> expected::Result { - if (block_from_block_storage.value->hash() - != wsv_top_block_info.top_hash) { - return fmt::format( - "The hash of block applied to WSV ({}) " - "does not match the hash of the block " - "from block storage ({}).", - wsv_top_block_info.top_hash, - block_from_block_storage.value->hash()); - } - return expected::Value{}; - }, - [](expected::Error &&error) - -> expected::Result { - return std::move(error).error.message; - }); - if (auto e = expected::resultToOptionalError(check_top_block)) { - return fmt::format( - "WSV top block (height {}) check failed: {} " - "Please check that WSV matches block storage " - "or avoid reusing WSV.", - wsv_ledger_height, - e.value()); + + const auto wsv_ledger_state = storage.getLedgerState(); + + shared_model::interface::types::HeightType wsv_ledger_height; + if (wsv_ledger_state) { + const auto &wsv_top_block_info = + wsv_ledger_state.value()->top_block_info; + wsv_ledger_height = wsv_top_block_info.height; + if (wsv_ledger_height > last_block_in_storage) { + return fmt::format( + "WSV state (height {}) is more recent " + "than block storage (height {}).", + wsv_ledger_height, + last_block_in_storage); + } + // check that a block with that height is present in the block + // storage and that its hash matches + auto check_top_block = + block_query->getBlock(wsv_top_block_info.height) + .match( + [&wsv_top_block_info]( + const auto &block_from_block_storage) + -> expected::Result { + if (block_from_block_storage.value->hash() + != wsv_top_block_info.top_hash) { + return fmt::format( + "The hash of block applied to WSV ({}) " + "does not match the hash of the block " + "from block storage ({}).", + wsv_top_block_info.top_hash, + block_from_block_storage.value->hash()); + } + return expected::Value{}; + }, + [](expected::Error &&error) + -> expected::Result { + return std::move(error).error.message; + }); + if (hasError(check_top_block)) { + return fmt::format( + "WSV top block (height {}) check failed: {} " + "Please check that WSV matches block storage " + "or avoid reusing WSV.", + wsv_ledger_height, + check_top_block.assumeError()); + } + } else { + wsv_ledger_height = 0; } - } else { - wsv_ledger_height = 0; + + return reindexBlocks(storage, + mutable_storage, + *block_query, + *interface_validator_, + *proto_validator_, + *validator_, + wsv_ledger_height + 1, + last_block_in_storage); + }; + if (hasError(res)) { + break; } - return reindexBlocks(storage, - mutable_storage, - *block_query, - *interface_validator_, - *proto_validator_, - *validator_, - wsv_ledger_height + 1, - last_block_in_storage); - }; + while (wait_for_new_blocks) { + std::this_thread::sleep_for(kWaitForBlockTime); + block_query->reloadBlockstore(); + auto new_last_block = block_query->getTopBlockHeight(); + + // try to load block to ensure it is written completely + auto block_result = block_query->getBlock(new_last_block); + while (hasError(block_result) + && (new_last_block > last_block_in_storage)) { + --new_last_block; + auto block_result = block_query->getBlock(new_last_block); + }; + + if (new_last_block > last_block_in_storage) { + last_block_in_storage = new_last_block; + log_->info("Blockstore has new blocks from {} to {}, restore them.", + last_block_in_storage, + new_last_block); + break; + } + } + } while (wait_for_new_blocks); + return res; }; } } // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/wsv_restorer_impl.hpp b/irohad/ametsuchi/impl/wsv_restorer_impl.hpp index db8201c63a6..6acbc8b2fde 100644 --- a/irohad/ametsuchi/impl/wsv_restorer_impl.hpp +++ b/irohad/ametsuchi/impl/wsv_restorer_impl.hpp @@ -6,10 +6,10 @@ #ifndef IROHA_WSVRESTORERIMPL_HPP #define IROHA_WSVRESTORERIMPL_HPP -#include "ametsuchi/wsv_restorer.hpp" - #include "ametsuchi/ledger_state.hpp" +#include "ametsuchi/wsv_restorer.hpp" #include "common/result.hpp" +#include "logger/logger_fwd.hpp" namespace shared_model { namespace interface { @@ -41,17 +41,21 @@ namespace iroha { shared_model::interface::Block>> interface_validator, std::unique_ptr> proto_validator, - std::shared_ptr validator); + std::shared_ptr validator, + logger::LoggerPtr log); virtual ~WsvRestorerImpl() = default; /** * Recover WSV (World State View). * Drop storage and apply blocks one by one. * @param storage of blocks in ledger + * @param wait_for_new_blocks - flag for wait for new blocks mode. + * Method waits for new blocks in block storage. * @return ledger state after restoration on success, otherwise error * string */ - CommitResult restoreWsv(Storage &storage) override; + CommitResult restoreWsv(Storage &storagem, + bool wait_for_new_blocks) override; private: std::unique_ptr> proto_validator_; std::shared_ptr validator_; + logger::LoggerPtr log_; }; } // namespace ametsuchi diff --git a/irohad/ametsuchi/indexer.hpp b/irohad/ametsuchi/indexer.hpp index 5addbe7cf0f..712a13f9c60 100644 --- a/irohad/ametsuchi/indexer.hpp +++ b/irohad/ametsuchi/indexer.hpp @@ -31,11 +31,15 @@ namespace iroha { /// Store a committed tx hash. virtual void committedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, const shared_model::interface::types::HashType &committed_tx_hash) = 0; /// Store a rejected tx hash. virtual void rejectedTxHash( + const TxPosition &position, + shared_model::interface::types::TimestampType const ts, const shared_model::interface::types::HashType &rejected_tx_hash) = 0; /// Index tx info. diff --git a/irohad/ametsuchi/key_value_storage.hpp b/irohad/ametsuchi/key_value_storage.hpp index 91d09f6a4b6..8beacb2d03c 100644 --- a/irohad/ametsuchi/key_value_storage.hpp +++ b/irohad/ametsuchi/key_value_storage.hpp @@ -49,6 +49,11 @@ namespace iroha { */ virtual Identifier last_id() const = 0; + /** + * Reloads data in case it was modified externally + */ + virtual void reload() = 0; + virtual void dropAll() = 0; virtual ~KeyValueStorage() = default; diff --git a/irohad/ametsuchi/ledger_state.hpp b/irohad/ametsuchi/ledger_state.hpp index 840fdd2db08..a4dddcc24ff 100644 --- a/irohad/ametsuchi/ledger_state.hpp +++ b/irohad/ametsuchi/ledger_state.hpp @@ -15,6 +15,8 @@ namespace iroha { shared_model::interface::types::HeightType height; shared_model::crypto::Hash top_hash; + TopBlockInfo() {} + TopBlockInfo(shared_model::interface::types::HeightType height, shared_model::crypto::Hash top_hash) : height(height), top_hash(std::move(top_hash)) {} diff --git a/irohad/ametsuchi/mutable_storage.hpp b/irohad/ametsuchi/mutable_storage.hpp index 3d899a58e5b..e3eab29d5d2 100644 --- a/irohad/ametsuchi/mutable_storage.hpp +++ b/irohad/ametsuchi/mutable_storage.hpp @@ -8,7 +8,6 @@ #include -#include #include "ametsuchi/block_storage.hpp" #include "ametsuchi/ledger_state.hpp" #include "common/result.hpp" @@ -56,16 +55,15 @@ namespace iroha { std::shared_ptr block) = 0; /** - * Applies an observable of blocks to current mutable state using logic - * specified in function - * @param blocks Blocks to be applied + * Applies a block to current mutable state using logic specified in + * function + * @param block Block to be applied * @param predicate Checks whether block is applicable prior to applying * transactions - * @return True if blocks were successfully applied, false otherwise. + * @return True if block was successfully applied, false otherwise. */ - virtual bool apply( - rxcpp::observable> - blocks, + virtual bool applyIf( + std::shared_ptr block, MutableStoragePredicate predicate) = 0; /// Apply the local changes made to this MutableStorage to block_storage diff --git a/irohad/ametsuchi/storage.hpp b/irohad/ametsuchi/storage.hpp index b9a04c10512..a9c716389a2 100644 --- a/irohad/ametsuchi/storage.hpp +++ b/irohad/ametsuchi/storage.hpp @@ -8,7 +8,6 @@ #include -#include #include "ametsuchi/block_query_factory.hpp" #include "ametsuchi/mutable_factory.hpp" #include "ametsuchi/peer_query_factory.hpp" @@ -80,18 +79,10 @@ namespace iroha { createMutableStorage(std::shared_ptr command_executor, BlockStorageFactory &storage_factory) = 0; - /** - * method called when block is written to the storage - * @return observable with the Block committed - */ - virtual rxcpp::observable< - std::shared_ptr> - on_commit() = 0; - /** * Removes all peers from WSV */ - virtual void resetPeers() = 0; + virtual expected::Result resetPeers() = 0; /** * Remove all blocks from block storage. diff --git a/irohad/ametsuchi/temporary_wsv.hpp b/irohad/ametsuchi/temporary_wsv.hpp index 45a59269f7b..d14aedbc9fe 100644 --- a/irohad/ametsuchi/temporary_wsv.hpp +++ b/irohad/ametsuchi/temporary_wsv.hpp @@ -8,6 +8,7 @@ #include +#include "ametsuchi/impl/db_transaction.hpp" #include "common/result.hpp" #include "validation/stateful_validator_common.hpp" @@ -55,6 +56,8 @@ namespace iroha { virtual std::unique_ptr createSavepoint( const std::string &name) = 0; + virtual DatabaseTransaction &getDbTransaction() = 0; + virtual ~TemporaryWsv() = default; }; } // namespace ametsuchi diff --git a/irohad/ametsuchi/wsv_query.hpp b/irohad/ametsuchi/wsv_query.hpp index bc990996e41..84985b55d45 100644 --- a/irohad/ametsuchi/wsv_query.hpp +++ b/irohad/ametsuchi/wsv_query.hpp @@ -6,9 +6,9 @@ #ifndef IROHA_WSV_QUERY_HPP #define IROHA_WSV_QUERY_HPP +#include #include -#include #include "common/result.hpp" #include "interfaces/common_objects/peer.hpp" #include "interfaces/common_objects/string_view_types.hpp" @@ -40,6 +40,35 @@ namespace iroha { std::vector>> getPeers() = 0; + // ToDo?(kuvaldini,iceseer) #997 + // /** + // * @brief Fetch domains stored in ledger + // * @return list of domains in insertion to ledger order + // */ + // virtual iroha::expected::Result< + // std::vector>, + // std::string> + // getDomains() = 0; + + /** + * @brief Fetch number of domains in ledger + * @return number of domains in ledger + */ + virtual iroha::expected::Result countPeers() = 0; + + /** + * @brief Fetch number of domains in ledger + * @return number of domains in ledger + */ + virtual iroha::expected::Result countDomains() = 0; + + /** + * @brief Fetch number of valid transactions in ledger + * @return number of transactions in ledger + */ + virtual iroha::expected::Result + countTransactions() = 0; + /** * Fetch peer with given public key from ledger * @return the peer if found, none otherwise diff --git a/irohad/ametsuchi/wsv_restorer.hpp b/irohad/ametsuchi/wsv_restorer.hpp index 5bc2e93c59f..d643053fb34 100644 --- a/irohad/ametsuchi/wsv_restorer.hpp +++ b/irohad/ametsuchi/wsv_restorer.hpp @@ -22,10 +22,13 @@ namespace iroha { /** * Recover WSV (World State View). * @param storage storage of blocks in ledger + * @param wait_for_new_blocks - flag for wait for new blocks mode. + * Method waits for new blocks in block storage. * @return ledger state after restoration on success, otherwise error * string */ - virtual CommitResult restoreWsv(Storage &storage) = 0; + virtual CommitResult restoreWsv(Storage &storage, + bool wait_for_new_blocks) = 0; }; } // namespace ametsuchi diff --git a/irohad/consensus/CMakeLists.txt b/irohad/consensus/CMakeLists.txt index 4cffde346d1..9669e74f93c 100644 --- a/irohad/consensus/CMakeLists.txt +++ b/irohad/consensus/CMakeLists.txt @@ -10,10 +10,3 @@ target_link_libraries(consensus_round Boost::boost shared_model_utils ) - -add_library(gate_object - impl/gate_object.cpp - ) -target_link_libraries(gate_object - Boost::boost - ) diff --git a/irohad/consensus/gate_object.hpp b/irohad/consensus/gate_object.hpp index 61db06b3933..de6b43c9868 100644 --- a/irohad/consensus/gate_object.hpp +++ b/irohad/consensus/gate_object.hpp @@ -6,7 +6,8 @@ #ifndef CONSENSUS_GATE_OBJECT_HPP #define CONSENSUS_GATE_OBJECT_HPP -#include +#include + #include "ametsuchi/ledger_state.hpp" #include "consensus/round.hpp" #include "cryptography/hash.hpp" @@ -32,11 +33,11 @@ namespace iroha { /// Current pair is valid struct PairValid : public BaseGateObject { - std::shared_ptr block; + std::shared_ptr block; PairValid(consensus::Round round, std::shared_ptr ledger_state, - std::shared_ptr block) + std::shared_ptr block) : BaseGateObject(std::move(round), std::move(ledger_state)), block(std::move(block)) {} }; @@ -87,21 +88,14 @@ namespace iroha { using Synchronizable::Synchronizable; }; - using GateObject = boost::variant; + using GateObject = std::variant; } // namespace consensus } // namespace iroha -extern template class boost::variant; - #endif // CONSENSUS_GATE_OBJECT_HPP diff --git a/irohad/consensus/impl/gate_object.cpp b/irohad/consensus/impl/gate_object.cpp deleted file mode 100644 index 0ae0a6cd67b..00000000000 --- a/irohad/consensus/impl/gate_object.cpp +++ /dev/null @@ -1,17 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "consensus/gate_object.hpp" - -using GateObject = iroha::consensus::GateObject; - -template GateObject::~variant(); -template GateObject::variant(GateObject &&) noexcept; -template GateObject::variant(const GateObject &); -template void GateObject::destroy_content() noexcept; -template int GateObject::which() const noexcept; -template void GateObject::indicate_which(int) noexcept; -template bool GateObject::using_backup() const noexcept; -template GateObject::convert_copy_into::convert_copy_into(void *) noexcept; diff --git a/irohad/consensus/impl/round.cpp b/irohad/consensus/impl/round.cpp index 000e731aca9..a40d834a974 100644 --- a/irohad/consensus/impl/round.cpp +++ b/irohad/consensus/impl/round.cpp @@ -31,13 +31,6 @@ namespace iroha { return not(*this == rhs); } - std::size_t RoundTypeHasher::operator()(const consensus::Round &val) const { - size_t seed = 0; - boost::hash_combine(seed, val.block_round); - boost::hash_combine(seed, val.reject_round); - return seed; - } - std::string Round::toString() const { return shared_model::detail::PrettyStringBuilder() .init("Round") @@ -45,5 +38,19 @@ namespace iroha { .appendNamed("reject", reject_round) .finalize(); } + + std::size_t hash_value(Round const &val) { + size_t seed = 0; + boost::hash_combine(seed, val.block_round); + boost::hash_combine(seed, val.reject_round); + return seed; + } } // namespace consensus } // namespace iroha + +namespace std { + std::size_t hash::operator()( + iroha::consensus::Round const &val) const noexcept { + return hash_value(val); + } +} // namespace std diff --git a/irohad/consensus/round.hpp b/irohad/consensus/round.hpp index dd320c30eb6..839f7551707 100644 --- a/irohad/consensus/round.hpp +++ b/irohad/consensus/round.hpp @@ -45,15 +45,16 @@ namespace iroha { std::string toString() const; }; - /** - * Class provides hash function for Round - */ - class RoundTypeHasher { - public: - std::size_t operator()(const consensus::Round &val) const; - }; + std::size_t hash_value(Round const &val); } // namespace consensus } // namespace iroha +namespace std { + template <> + struct hash { + std::size_t operator()(iroha::consensus::Round const &val) const noexcept; + }; +} // namespace std + #endif // IROHA_ROUND_HPP diff --git a/irohad/consensus/yac/CMakeLists.txt b/irohad/consensus/yac/CMakeLists.txt index e86566984e3..7f3ae7c059d 100644 --- a/irohad/consensus/yac/CMakeLists.txt +++ b/irohad/consensus/yac/CMakeLists.txt @@ -19,7 +19,6 @@ add_library(yac impl/peer_orderer_impl.cpp impl/yac_gate_impl.cpp impl/yac_hash_provider_impl.cpp - impl/consensus_outcome_delay.cpp storage/impl/yac_common.cpp storage/impl/yac_block_storage.cpp @@ -30,12 +29,10 @@ add_library(yac target_link_libraries(yac supermajority_checker common - rxcpp logger logger_manager hash consensus_round - gate_object permutation_generator ) # avoid compilation error due to missing operator<< in Answer variant types @@ -45,6 +42,7 @@ target_compile_definitions(yac add_library(yac_transport transport/impl/network_impl.cpp + transport/impl/consensus_service_impl.cpp impl/yac_crypto_provider_impl.cpp ) target_link_libraries(yac_transport diff --git a/irohad/consensus/yac/cluster_order.hpp b/irohad/consensus/yac/cluster_order.hpp index 5bba23a87f6..86ec3d3f2cf 100644 --- a/irohad/consensus/yac/cluster_order.hpp +++ b/irohad/consensus/yac/cluster_order.hpp @@ -7,80 +7,76 @@ #define IROHA_CLUSTER_ORDER_HPP #include +#include #include -#include #include "consensus/yac/yac_types.hpp" #include "interfaces/common_objects/types.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - /** - * Class provide ordering on cluster for current round - */ - class ClusterOrdering { - public: - /** - * Creates cluster ordering from the vector of peers and peer positions - * @param order vector of peers - * @param peer_positions vector of indexes of peer positions - * @return ClusterOrdering if vectors are not empty, null otherwise - */ - static boost::optional create( - std::vector> const - &order, - std::vector const &peer_positions); - - /** - * Creates cluster ordering from the vector of peers - * @param order vector of peers - * @return ClusterOrdering if vectors are not empty, null otherwise - */ - static boost::optional create( - std::vector> const - &order); - - /** - * Provide current leader peer - */ - const shared_model::interface::Peer ¤tLeader(); - - /** - * Switch to next peer as leader - * @return this - */ - ClusterOrdering &switchToNext(); - - /** - * @return true if current leader not last peer in order - */ - bool hasNext() const; - - const shared_model::interface::types::PeerList &getPeers() const; - - PeersNumberType getNumberOfPeers() const; - - virtual ~ClusterOrdering() = default; - - ClusterOrdering() = delete; - - private: - // prohibit creation of the object not from create method - explicit ClusterOrdering( - std::vector> const - &order, - std::vector const &peer_positions); - - explicit ClusterOrdering( - std::vector> const - &order); - - std::vector> order_; - PeersNumberType index_ = 0; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + /** + * Class provide ordering on cluster for current round + */ + class ClusterOrdering { + public: + /** + * Creates cluster ordering from the vector of peers and peer positions + * @param order vector of peers + * @param peer_positions vector of indexes of peer positions + * @return ClusterOrdering if vectors are not empty, null otherwise + */ + static std::optional create( + std::vector> const + &order, + std::vector const &peer_positions); + + /** + * Creates cluster ordering from the vector of peers + * @param order vector of peers + * @return ClusterOrdering if vectors are not empty, null otherwise + */ + static std::optional create( + std::vector> const + &order); + + /** + * Provide current leader peer + */ + const shared_model::interface::Peer ¤tLeader(); + + /** + * Switch to next peer as leader + * @return this + */ + ClusterOrdering &switchToNext(); + + /** + * @return true if current leader not last peer in order + */ + bool hasNext() const; + + const shared_model::interface::types::PeerList &getPeers() const; + + PeersNumberType getNumberOfPeers() const; + + virtual ~ClusterOrdering() = default; + + ClusterOrdering() = delete; + + private: + // prohibit creation of the object not from create method + explicit ClusterOrdering( + std::vector> const + &order, + std::vector const &peer_positions); + + explicit ClusterOrdering( + std::vector> const + &order); + + std::vector> order_; + PeersNumberType index_ = 0; + }; +} // namespace iroha::consensus::yac + #endif // IROHA_CLUSTER_ORDER_HPP diff --git a/irohad/consensus/yac/consensus_outcome_type.hpp b/irohad/consensus/yac/consensus_outcome_type.hpp index c988b19d3de..257bf0232ad 100644 --- a/irohad/consensus/yac/consensus_outcome_type.hpp +++ b/irohad/consensus/yac/consensus_outcome_type.hpp @@ -6,17 +6,13 @@ #ifndef IROHA_CONSENSUS_OUTCOME_TYPE_HPP #define IROHA_CONSENSUS_OUTCOME_TYPE_HPP -namespace iroha { - namespace consensus { - namespace yac { - enum class ConsensusOutcomeType { - kCommit, /// commit for current round - kFuture, /// future round event - kNothing, /// peers voted for an empty hash - kReject, /// peers voted for different hashes - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + enum class ConsensusOutcomeType { + kCommit, /// commit for current round + kFuture, /// future round event + kNothing, /// peers voted for an empty hash + kReject, /// peers voted for different hashes + }; +} // namespace iroha::consensus::yac #endif // IROHA_CONSENSUS_CONSISTENCY_MODEL_HPP diff --git a/irohad/consensus/yac/consistency_model.hpp b/irohad/consensus/yac/consistency_model.hpp index 107820b77c9..6402c6f56b7 100644 --- a/irohad/consensus/yac/consistency_model.hpp +++ b/irohad/consensus/yac/consistency_model.hpp @@ -6,17 +6,11 @@ #ifndef IROHA_CONSENSUS_CONSISTENCY_MODEL_HPP #define IROHA_CONSENSUS_CONSISTENCY_MODEL_HPP -namespace iroha { - namespace consensus { - namespace yac { - - enum class ConsistencyModel { - kBft, ///< BFT consistency - kCft, ///< CFT consistency - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + enum class ConsistencyModel { + kBft, ///< BFT consistency + kCft, ///< CFT consistency + }; +} // namespace iroha::consensus::yac #endif // IROHA_CONSENSUS_CONSISTENCY_MODEL_HPP diff --git a/irohad/consensus/yac/impl/cluster_order.cpp b/irohad/consensus/yac/impl/cluster_order.cpp index 9874759420e..272e22eb354 100644 --- a/irohad/consensus/yac/impl/cluster_order.cpp +++ b/irohad/consensus/yac/impl/cluster_order.cpp @@ -5,74 +5,65 @@ #include "consensus/yac/cluster_order.hpp" -namespace iroha { - namespace consensus { - namespace yac { +#include - boost::optional ClusterOrdering::create( - const std::vector> - &order, - std::vector const &peer_positions) { - if (order.empty()) { - return boost::none; - } - return ClusterOrdering(order, peer_positions); - } +using iroha::consensus::yac::ClusterOrdering; - boost::optional ClusterOrdering::create( - const std::vector> - &order) { - if (order.empty()) { - return boost::none; - } - return ClusterOrdering(order); - } +std::optional ClusterOrdering::create( + const std::vector> &order, + std::vector const &peer_positions) { + if (order.empty()) { + return std::nullopt; + } + return ClusterOrdering(order, peer_positions); +} - ClusterOrdering::ClusterOrdering( - std::vector> const - &order, - std::vector const &peer_positions) { - order_.reserve(order.size()); - BOOST_ASSERT_MSG( - peer_positions.size() == order.size(), - "Peer positions must be the same size to define ordering."); +std::optional ClusterOrdering::create( + const std::vector> &order) { + if (order.empty()) { + return std::nullopt; + } + return ClusterOrdering(order); +} - for (auto const &i : peer_positions) { - order_.emplace_back(order[i]); - } - } +ClusterOrdering::ClusterOrdering( + std::vector> const &order, + std::vector const &peer_positions) { + order_.reserve(order.size()); + BOOST_ASSERT_MSG(peer_positions.size() == order.size(), + "Peer positions must be the same size to define ordering."); - ClusterOrdering::ClusterOrdering( - std::vector> const - &order) - : order_(order) {} + for (auto const &i : peer_positions) { + order_.emplace_back(order[i]); + } +} - // TODO : 24/03/2018 x3medima17: make it const, IR-1164 - const shared_model::interface::Peer &ClusterOrdering::currentLeader() { - if (index_ >= order_.size()) { - index_ = 0; - } - return *order_.at(index_); - } +ClusterOrdering::ClusterOrdering( + std::vector> const &order) + : order_(order) {} - bool ClusterOrdering::hasNext() const { - return index_ != order_.size(); - } +// TODO : 24/03/2018 x3medima17: make it const, IR-1164 +const shared_model::interface::Peer &ClusterOrdering::currentLeader() { + if (index_ >= order_.size()) { + index_ = 0; + } + return *order_.at(index_); +} - ClusterOrdering &ClusterOrdering::switchToNext() { - ++index_; - return *this; - } +bool ClusterOrdering::hasNext() const { + return index_ != order_.size(); +} - const shared_model::interface::types::PeerList & - ClusterOrdering::getPeers() const { - return order_; - } +ClusterOrdering &ClusterOrdering::switchToNext() { + ++index_; + return *this; +} - size_t ClusterOrdering::getNumberOfPeers() const { - return order_.size(); - } +const shared_model::interface::types::PeerList &ClusterOrdering::getPeers() + const { + return order_; +} - } // namespace yac - } // namespace consensus -} // namespace iroha +size_t ClusterOrdering::getNumberOfPeers() const { + return order_.size(); +} diff --git a/irohad/consensus/yac/impl/consensus_outcome_delay.cpp b/irohad/consensus/yac/impl/consensus_outcome_delay.cpp deleted file mode 100644 index 3c694eb48a9..00000000000 --- a/irohad/consensus/yac/impl/consensus_outcome_delay.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "consensus/yac/impl/consensus_outcome_delay.hpp" - -#include -#include - -using namespace iroha::consensus::yac; - -ConsensusOutcomeDelay::ConsensusOutcomeDelay( - std::chrono::milliseconds max_rounds_delay) - : max_rounds_delay_(max_rounds_delay), - delay_increment_( - std::min(max_rounds_delay_, std::chrono::milliseconds(1000))), - reject_delay_(0), - max_local_counter_(2), - local_counter_(0) {} - -std::chrono::milliseconds ConsensusOutcomeDelay::operator()( - ConsensusOutcomeType type) { - if (type == ConsensusOutcomeType::kReject - or type == ConsensusOutcomeType::kNothing) { - // Increment reject_counter each local_counter calls of function - ++local_counter_; - if (local_counter_ == max_local_counter_) { - local_counter_ = 0; - if (reject_delay_ < max_rounds_delay_) { - reject_delay_ += delay_increment_; - } - } - } else { - reject_delay_ = std::chrono::milliseconds(0); - } - return reject_delay_; -} diff --git a/irohad/consensus/yac/impl/consensus_outcome_delay.hpp b/irohad/consensus/yac/impl/consensus_outcome_delay.hpp deleted file mode 100644 index 6b2f3d28079..00000000000 --- a/irohad/consensus/yac/impl/consensus_outcome_delay.hpp +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_CONSENSUS_OUTCOME_DELAY_HPP -#define IROHA_CONSENSUS_OUTCOME_DELAY_HPP - -#include -#include - -#include "consensus/yac/consensus_outcome_type.hpp" - -namespace iroha::consensus::yac { - - class ConsensusOutcomeDelay { - public: - ConsensusOutcomeDelay(std::chrono::milliseconds max_rounds_delay); - - std::chrono::milliseconds operator()(ConsensusOutcomeType type); - - private: - std::chrono::milliseconds const max_rounds_delay_; - std::chrono::milliseconds const delay_increment_; - std::chrono::milliseconds reject_delay_; - uint64_t const max_local_counter_; - uint64_t local_counter_; - }; - -} // namespace iroha::consensus::yac - -#endif // IROHA_CONSENSUS_OUTCOME_DELAY_HPP diff --git a/irohad/consensus/yac/impl/peer_orderer_impl.cpp b/irohad/consensus/yac/impl/peer_orderer_impl.cpp index 1d956a5314b..a649db50292 100644 --- a/irohad/consensus/yac/impl/peer_orderer_impl.cpp +++ b/irohad/consensus/yac/impl/peer_orderer_impl.cpp @@ -13,24 +13,15 @@ #include "consensus/yac/yac_hash_provider.hpp" #include "interfaces/common_objects/peer.hpp" -namespace iroha { - namespace consensus { - namespace yac { - PeerOrdererImpl::PeerOrdererImpl( - std::shared_ptr peer_query_factory) - : peer_query_factory_(peer_query_factory) {} +using iroha::consensus::yac::PeerOrdererImpl; - boost::optional PeerOrdererImpl::getOrdering( - const YacHash &hash, - std::vector> const - &peers) { - auto prng = iroha::makeSeededPrng(hash.vote_hashes.block_hash.data(), - hash.vote_hashes.block_hash.size()); - iroha::generatePermutation( - peer_positions_, std::move(prng), peers.size()); +std::optional +PeerOrdererImpl::getOrdering( + const YacHash &hash, + std::vector> const &peers) { + auto prng = iroha::makeSeededPrng(hash.vote_hashes.block_hash.data(), + hash.vote_hashes.block_hash.size()); + iroha::generatePermutation(peer_positions_, std::move(prng), peers.size()); - return ClusterOrdering::create(peers, peer_positions_); - } - } // namespace yac - } // namespace consensus -} // namespace iroha + return ClusterOrdering::create(peers, peer_positions_); +} diff --git a/irohad/consensus/yac/impl/peer_orderer_impl.hpp b/irohad/consensus/yac/impl/peer_orderer_impl.hpp index 50991021d25..5393bab76e0 100644 --- a/irohad/consensus/yac/impl/peer_orderer_impl.hpp +++ b/irohad/consensus/yac/impl/peer_orderer_impl.hpp @@ -8,35 +8,22 @@ #include -#include "ametsuchi/peer_query_factory.hpp" #include "consensus/yac/yac_peer_orderer.hpp" -namespace iroha { - - namespace consensus { - namespace yac { - - class ClusterOrdering; - class YacHash; - - class PeerOrdererImpl : public YacPeerOrderer { - public: - // TODO 30.01.2019 lebdron: IR-262 Remove PeerQueryFactory - explicit PeerOrdererImpl( - std::shared_ptr peer_query_factory); - - boost::optional getOrdering( - const YacHash &hash, - std::vector> const - &peers) override; - - private: - std::vector peer_positions_; - std::shared_ptr peer_query_factory_; - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + class ClusterOrdering; + class YacHash; + + class PeerOrdererImpl : public YacPeerOrderer { + public: + std::optional getOrdering( + const YacHash &hash, + std::vector> const + &peers) override; + + private: + std::vector peer_positions_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_PEER_ORDERER_IMPL_HPP diff --git a/irohad/consensus/yac/impl/supermajority_checker_bft.cpp b/irohad/consensus/yac/impl/supermajority_checker_bft.cpp index 529daf4de61..0d061debd5c 100644 --- a/irohad/consensus/yac/impl/supermajority_checker_bft.cpp +++ b/irohad/consensus/yac/impl/supermajority_checker_bft.cpp @@ -9,32 +9,26 @@ #include #include "consensus/yac/impl/supermajority_checker_kf1.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - bool SupermajorityCheckerBft::hasSupermajority( - PeersNumberType agreed, PeersNumberType all) const { - return checkKfPlus1Supermajority( - agreed, all, detail::kSupermajorityCheckerKfPlus1Bft); - } - - bool SupermajorityCheckerBft::isTolerated(PeersNumberType number, - PeersNumberType all) const { - return checkKfPlus1Tolerance( - number, all, detail::kSupermajorityCheckerKfPlus1Bft); - } - - bool SupermajorityCheckerBft::canHaveSupermajority( - const VoteGroups &votes, PeersNumberType all) const { - const PeersNumberType largest_group = - boost::empty(votes) ? 0 : *boost::max_element(votes); - const PeersNumberType voted = boost::accumulate(votes, 0); - const PeersNumberType not_voted = all - voted; - - return hasSupermajority(largest_group + not_voted, all); - } - - } // namespace yac - } // namespace consensus -} // namespace iroha +using iroha::consensus::yac::SupermajorityCheckerBft; + +bool SupermajorityCheckerBft::hasSupermajority(PeersNumberType agreed, + PeersNumberType all) const { + return checkKfPlus1Supermajority( + agreed, all, detail::kSupermajorityCheckerKfPlus1Bft); +} + +bool SupermajorityCheckerBft::isTolerated(PeersNumberType number, + PeersNumberType all) const { + return checkKfPlus1Tolerance( + number, all, detail::kSupermajorityCheckerKfPlus1Bft); +} + +bool SupermajorityCheckerBft::canHaveSupermajority(const VoteGroups &votes, + PeersNumberType all) const { + const PeersNumberType largest_group = + boost::empty(votes) ? 0 : *boost::max_element(votes); + const PeersNumberType voted = boost::accumulate(votes, 0); + const PeersNumberType not_voted = all - voted; + + return hasSupermajority(largest_group + not_voted, all); +} diff --git a/irohad/consensus/yac/impl/supermajority_checker_bft.hpp b/irohad/consensus/yac/impl/supermajority_checker_bft.hpp index 11ddc8c1b15..27099e1fc28 100644 --- a/irohad/consensus/yac/impl/supermajority_checker_bft.hpp +++ b/irohad/consensus/yac/impl/supermajority_checker_bft.hpp @@ -8,29 +8,24 @@ #include "consensus/yac/supermajority_checker.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - namespace detail { - /// The free parameter of Kf+1 consistency model for BFT. - constexpr unsigned int kSupermajorityCheckerKfPlus1Bft = 3; - } // namespace detail - - /// An implementation of BFT supermajority checker. - class SupermajorityCheckerBft : public SupermajorityChecker { - public: - bool hasSupermajority(PeersNumberType current, +namespace iroha::consensus::yac { + namespace detail { + /// The free parameter of Kf+1 consistency model for BFT. + constexpr unsigned int kSupermajorityCheckerKfPlus1Bft = 3; + } // namespace detail + + /// An implementation of BFT supermajority checker. + class SupermajorityCheckerBft : public SupermajorityChecker { + public: + bool hasSupermajority(PeersNumberType current, + PeersNumberType all) const override; + + bool isTolerated(PeersNumberType number, + PeersNumberType all) const override; + + bool canHaveSupermajority(const VoteGroups &votes, PeersNumberType all) const override; - - bool isTolerated(PeersNumberType number, - PeersNumberType all) const override; - - bool canHaveSupermajority(const VoteGroups &votes, - PeersNumberType all) const override; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + }; +} // namespace iroha::consensus::yac #endif // IROHA_SUPERMAJORITY_CHECKER_BFT_HPP diff --git a/irohad/consensus/yac/impl/supermajority_checker_cft.cpp b/irohad/consensus/yac/impl/supermajority_checker_cft.cpp index 45e148542c6..c94b8069401 100644 --- a/irohad/consensus/yac/impl/supermajority_checker_cft.cpp +++ b/irohad/consensus/yac/impl/supermajority_checker_cft.cpp @@ -9,32 +9,26 @@ #include #include "consensus/yac/impl/supermajority_checker_kf1.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - bool SupermajorityCheckerCft::hasSupermajority( - PeersNumberType agreed, PeersNumberType all) const { - return checkKfPlus1Supermajority( - agreed, all, detail::kSupermajorityCheckerKfPlus1Cft); - } - - bool SupermajorityCheckerCft::isTolerated(PeersNumberType number, - PeersNumberType all) const { - return checkKfPlus1Tolerance( - number, all, detail::kSupermajorityCheckerKfPlus1Cft); - } - - bool SupermajorityCheckerCft::canHaveSupermajority( - const VoteGroups &votes, PeersNumberType all) const { - const PeersNumberType largest_group = - boost::empty(votes) ? 0 : *boost::max_element(votes); - const PeersNumberType voted = boost::accumulate(votes, 0); - const PeersNumberType not_voted = all - voted; - - return hasSupermajority(largest_group + not_voted, all); - } - - } // namespace yac - } // namespace consensus -} // namespace iroha +using iroha::consensus::yac::SupermajorityCheckerCft; + +bool SupermajorityCheckerCft::hasSupermajority(PeersNumberType agreed, + PeersNumberType all) const { + return checkKfPlus1Supermajority( + agreed, all, detail::kSupermajorityCheckerKfPlus1Cft); +} + +bool SupermajorityCheckerCft::isTolerated(PeersNumberType number, + PeersNumberType all) const { + return checkKfPlus1Tolerance( + number, all, detail::kSupermajorityCheckerKfPlus1Cft); +} + +bool SupermajorityCheckerCft::canHaveSupermajority(const VoteGroups &votes, + PeersNumberType all) const { + const PeersNumberType largest_group = + boost::empty(votes) ? 0 : *boost::max_element(votes); + const PeersNumberType voted = boost::accumulate(votes, 0); + const PeersNumberType not_voted = all - voted; + + return hasSupermajority(largest_group + not_voted, all); +} diff --git a/irohad/consensus/yac/impl/supermajority_checker_cft.hpp b/irohad/consensus/yac/impl/supermajority_checker_cft.hpp index f82ca102d13..6010b1d8300 100644 --- a/irohad/consensus/yac/impl/supermajority_checker_cft.hpp +++ b/irohad/consensus/yac/impl/supermajority_checker_cft.hpp @@ -8,29 +8,24 @@ #include "consensus/yac/supermajority_checker.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - namespace detail { - /// The free parameter of Kf+1 consistency model for CFT. - constexpr unsigned int kSupermajorityCheckerKfPlus1Cft = 2; - } // namespace detail - - /// An implementation of CFT supermajority checker. - class SupermajorityCheckerCft : public SupermajorityChecker { - public: - bool hasSupermajority(PeersNumberType current, +namespace iroha::consensus::yac { + namespace detail { + /// The free parameter of Kf+1 consistency model for CFT. + constexpr unsigned int kSupermajorityCheckerKfPlus1Cft = 2; + } // namespace detail + + /// An implementation of CFT supermajority checker. + class SupermajorityCheckerCft : public SupermajorityChecker { + public: + bool hasSupermajority(PeersNumberType current, + PeersNumberType all) const override; + + bool isTolerated(PeersNumberType number, + PeersNumberType all) const override; + + bool canHaveSupermajority(const VoteGroups &votes, PeersNumberType all) const override; - - bool isTolerated(PeersNumberType number, - PeersNumberType all) const override; - - bool canHaveSupermajority(const VoteGroups &votes, - PeersNumberType all) const override; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + }; +} // namespace iroha::consensus::yac #endif // IROHA_SUPERMAJORITY_CHECKER_CFT_HPP diff --git a/irohad/consensus/yac/impl/supermajority_checker_getter.cpp b/irohad/consensus/yac/impl/supermajority_checker_getter.cpp index d4c0d31c221..51c0004ae40 100644 --- a/irohad/consensus/yac/impl/supermajority_checker_getter.cpp +++ b/irohad/consensus/yac/impl/supermajority_checker_getter.cpp @@ -8,22 +8,16 @@ #include "consensus/yac/impl/supermajority_checker_bft.hpp" #include "consensus/yac/impl/supermajority_checker_cft.hpp" -namespace iroha { - namespace consensus { - namespace yac { +namespace yac = iroha::consensus::yac; - std::unique_ptr getSupermajorityChecker( - ConsistencyModel c) { - switch (c) { - case ConsistencyModel::kCft: - return std::make_unique(); - case ConsistencyModel::kBft: - return std::make_unique(); - default: - throw(std::runtime_error("Unknown consistency model requested!")); - } - } - - } // namespace yac - } // namespace consensus -} // namespace iroha +std::unique_ptr +yac::getSupermajorityChecker(ConsistencyModel c) { + switch (c) { + case ConsistencyModel::kCft: + return std::make_unique(); + case ConsistencyModel::kBft: + return std::make_unique(); + default: + throw(std::runtime_error("Unknown consistency model requested!")); + } +} diff --git a/irohad/consensus/yac/impl/supermajority_checker_kf1.hpp b/irohad/consensus/yac/impl/supermajority_checker_kf1.hpp index 17f27a6ea83..247fb91b4ff 100644 --- a/irohad/consensus/yac/impl/supermajority_checker_kf1.hpp +++ b/irohad/consensus/yac/impl/supermajority_checker_kf1.hpp @@ -8,57 +8,51 @@ #include "consensus/yac/supermajority_checker.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - /** - * A generic implementation of N = K * f + 1 model checkers. - * N is the amount of peers in the network, f is the number of tolerated - * faulty peers, and K is a free parameter. Supermajority is achieved when - * at least N - f peers agree. For the networks of arbitrary peers amount - * Na the tolerated number of faulty peers is (Na - 1) % K. - */ - - /** - * Check supermajority condition. - * - * @param number - the number of peers agreed on the state - * @param all - the total number of peers in the network - * @param k - the free parameter of the model - * - * @return whether supermajority is achieved by the agreed peers - */ - inline bool checkKfPlus1Supermajority(PeersNumberType number, - PeersNumberType all, - unsigned int k) { - if (number > all) { - return false; - } - return number * k >= (k - 1) * (all - 1) + k; - } - - /** - * Check tolerance condition. - * - * @param number - the number of possibly faulty peers - * @param all - the total number of peers in the network - * @param k - the free parameter of the model - * - * @return whether the given number of possibly faulty peers is tolerated - * by the network. - */ - inline bool checkKfPlus1Tolerance(PeersNumberType number, +namespace iroha::consensus::yac { + /** + * A generic implementation of N = K * f + 1 model checkers. + * N is the amount of peers in the network, f is the number of tolerated + * faulty peers, and K is a free parameter. Supermajority is achieved when + * at least N - f peers agree. For the networks of arbitrary peers amount + * Na the tolerated number of faulty peers is (Na - 1) % K. + */ + + /** + * Check supermajority condition. + * + * @param number - the number of peers agreed on the state + * @param all - the total number of peers in the network + * @param k - the free parameter of the model + * + * @return whether supermajority is achieved by the agreed peers + */ + inline bool checkKfPlus1Supermajority(PeersNumberType number, PeersNumberType all, unsigned int k) { - if (number > all) { - return false; - } - return number * k > all - 1; - } - - } // namespace yac - } // namespace consensus -} // namespace iroha + if (number > all) { + return false; + } + return number * k >= (k - 1) * (all - 1) + k; + } + + /** + * Check tolerance condition. + * + * @param number - the number of possibly faulty peers + * @param all - the total number of peers in the network + * @param k - the free parameter of the model + * + * @return whether the given number of possibly faulty peers is tolerated + * by the network. + */ + inline bool checkKfPlus1Tolerance(PeersNumberType number, + PeersNumberType all, + unsigned int k) { + if (number > all) { + return false; + } + return number * k > all - 1; + } +} // namespace iroha::consensus::yac #endif // IROHA_SUPERMAJORITY_CHECKER_KF1_HPP diff --git a/irohad/consensus/yac/impl/timer_impl.cpp b/irohad/consensus/yac/impl/timer_impl.cpp index 74567c6c8ed..2914a4e853e 100644 --- a/irohad/consensus/yac/impl/timer_impl.cpp +++ b/irohad/consensus/yac/impl/timer_impl.cpp @@ -4,42 +4,16 @@ */ #include "consensus/yac/impl/timer_impl.hpp" -#include -namespace iroha { - namespace consensus { - namespace yac { - TimerImpl::TimerImpl(std::chrono::milliseconds delay_milliseconds, - rxcpp::observe_on_one_worker coordination) - : delay_milliseconds_(delay_milliseconds), - // use the same worker for all the invocations - coordination_(coordination.create_coordinator(coordinator_lifetime_) - .get_scheduler()) {} +#include "main/subscription.hpp" - void TimerImpl::invokeAfterDelay(std::function handler) { - deny(); - auto timer_lifetime = - rxcpp::observable<>::timer(delay_milliseconds_, coordination_) - .subscribe([handler{std::move(handler)}](auto) { handler(); }); - { - std::lock_guard lock(timer_lifetime_mutex); - timer_lifetime_ = timer_lifetime; - } - } +using iroha::consensus::yac::TimerImpl; - void TimerImpl::deny() { - rxcpp::composite_subscription timer_lifetime; - { - std::lock_guard lock(timer_lifetime_mutex); - timer_lifetime = timer_lifetime_; - } - timer_lifetime.unsubscribe(); - } +TimerImpl::TimerImpl(std::chrono::milliseconds delay_milliseconds) + : delay_milliseconds_(delay_milliseconds) {} - TimerImpl::~TimerImpl() { - deny(); - coordinator_lifetime_.unsubscribe(); - } - } // namespace yac - } // namespace consensus -} // namespace iroha +void TimerImpl::invokeAfterDelay(std::function handler) { + getSubscription()->dispatcher()->addDelayed(SubscriptionEngineHandlers::kYac, + delay_milliseconds_, + std::move(handler)); +} diff --git a/irohad/consensus/yac/impl/timer_impl.hpp b/irohad/consensus/yac/impl/timer_impl.hpp index 965627eec34..8e7fcc22520 100644 --- a/irohad/consensus/yac/impl/timer_impl.hpp +++ b/irohad/consensus/yac/impl/timer_impl.hpp @@ -6,42 +6,24 @@ #ifndef IROHA_TIMER_IMPL_HPP #define IROHA_TIMER_IMPL_HPP -#include - -#include #include "consensus/yac/timer.hpp" -#include - -namespace iroha { - namespace consensus { - namespace yac { - class TimerImpl : public Timer { - public: - /** - * Constructor - * @param delay_milliseconds delay before the next method invoke - * @param coordination factory for coordinators to run the timer on - */ - TimerImpl(std::chrono::milliseconds delay_milliseconds, - rxcpp::observe_on_one_worker coordination); - TimerImpl(const TimerImpl &) = delete; - TimerImpl &operator=(const TimerImpl &) = delete; +#include - void invokeAfterDelay(std::function handler) override; - void deny() override; +namespace iroha::consensus::yac { + class TimerImpl : public Timer { + public: + /** + * Constructor + * @param delay_milliseconds delay before the next method invoke + */ + TimerImpl(std::chrono::milliseconds delay_milliseconds); - ~TimerImpl() override; + void invokeAfterDelay(std::function handler) override; - private: - std::mutex timer_lifetime_mutex; - std::chrono::milliseconds delay_milliseconds_; - rxcpp::composite_subscription coordinator_lifetime_; - rxcpp::observe_on_one_worker coordination_; - rxcpp::composite_subscription timer_lifetime_; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + private: + std::chrono::milliseconds delay_milliseconds_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_TIMER_IMPL_HPP diff --git a/irohad/consensus/yac/impl/yac.cpp b/irohad/consensus/yac/impl/yac.cpp index c568b58e924..9c8965283a0 100644 --- a/irohad/consensus/yac/impl/yac.cpp +++ b/irohad/consensus/yac/impl/yac.cpp @@ -24,316 +24,303 @@ auto &getRound(const std::vector &state) { return state.at(0).hash.vote_round; } -namespace iroha { - namespace consensus { - namespace yac { - - std::shared_ptr Yac::create( - YacVoteStorage vote_storage, - std::shared_ptr network, - std::shared_ptr crypto, - std::shared_ptr timer, - ClusterOrdering order, - Round round, - rxcpp::observe_on_one_worker worker, - logger::LoggerPtr log) { - return std::make_shared(vote_storage, - network, - crypto, - timer, - order, - round, - worker, - std::move(log)); - } +using iroha::consensus::yac::Yac; + +std::shared_ptr Yac::create(YacVoteStorage vote_storage, + std::shared_ptr network, + std::shared_ptr crypto, + std::shared_ptr timer, + shared_model::interface::types::PeerList order, + Round round, + logger::LoggerPtr log) { + return std::make_shared( + vote_storage, network, crypto, timer, order, round, std::move(log)); +} - Yac::Yac(YacVoteStorage vote_storage, - std::shared_ptr network, - std::shared_ptr crypto, - std::shared_ptr timer, - ClusterOrdering order, - Round round, - rxcpp::observe_on_one_worker worker, - logger::LoggerPtr log) - : log_(std::move(log)), - cluster_order_(order), - round_(round), - worker_(worker), - notifier_(worker_, notifier_lifetime_), - vote_storage_(std::move(vote_storage)), - network_(std::move(network)), - crypto_(std::move(crypto)), - timer_(std::move(timer)) {} - - Yac::~Yac() { - notifier_lifetime_.unsubscribe(); - } +Yac::Yac(YacVoteStorage vote_storage, + std::shared_ptr network, + std::shared_ptr crypto, + std::shared_ptr timer, + shared_model::interface::types::PeerList order, + Round round, + logger::LoggerPtr log) + : log_(std::move(log)), + cluster_order_(order), + round_(round), + vote_storage_(std::move(vote_storage)), + network_(std::move(network)), + crypto_(std::move(crypto)), + timer_(std::move(timer)) {} + +void Yac::stop() { + network_->stop(); +} - void Yac::stop() { - network_->stop(); +std::optional Yac::processRoundSwitch( + consensus::Round const &round, + shared_model::interface::types::PeerList const &peers) { + round_ = round; + cluster_order_ = peers; + std::optional result; + auto it = future_states_.lower_bound(round_); + while (it != future_states_.end() + and it->first.block_round == round_.block_round) { + if (not it->second.empty()) { + if (auto maybe_answer = onState(std::vector( + std::make_move_iterator(it->second.begin()), + std::make_move_iterator(it->second.end())))) { + result = std::move(maybe_answer); } + } + ++it; + } + future_states_.erase(future_states_.begin(), it); + return result; +} - // ------|Hash gate|------ +// ------|Hash gate|------ - void Yac::vote(YacHash hash, - ClusterOrdering order, - boost::optional alternative_order) { - log_->info("Order for voting: [{}]", - boost::algorithm::join( - order.getPeers() - | boost::adaptors::transformed( - [](const auto &p) { return p->address(); }), - ", ")); - - std::unique_lock lock(mutex_); - cluster_order_ = order; - alternative_order_ = std::move(alternative_order); - round_ = hash.vote_round; - lock.unlock(); - auto vote = crypto_->getVote(hash); - // TODO 10.06.2018 andrei: IR-1407 move YAC propagation strategy to a - // separate entity - votingStep(vote); - } +void Yac::vote(YacHash hash, + ClusterOrdering order, + std::optional alternative_order) { + log_->info( + "Order for voting: [{}]", + boost::algorithm::join( + order.getPeers() | boost::adaptors::transformed([](const auto &p) { + return p->address(); + }), + ", ")); + + alternative_order_.reset(); + if (alternative_order) { + alternative_order_ = alternative_order->getPeers(); + } + assert(round_ == hash.vote_round); + auto vote = crypto_->getVote(hash); + // TODO 10.06.2018 andrei: IR-1407 move YAC propagation strategy to a + // separate entity + votingStep(vote, alternative_order ? *alternative_order : order); +} - rxcpp::observable Yac::onOutcome() { - return notifier_.get_observable(); - } +// ------|Network notifications|------ - // ------|Network notifications|------ +template +void removeMatching(std::vector &target, const P &predicate) { + target.erase(std::remove_if(target.begin(), target.end(), predicate), + target.end()); +} - template - void removeMatching(std::vector &target, const P &predicate) { - target.erase(std::remove_if(target.begin(), target.end(), predicate), - target.end()); - } +template +bool contains(const CollectionType &haystack, const ElementType &needle) { + return std::find(haystack.begin(), haystack.end(), needle) != haystack.end(); +} - template - bool contains(const CollectionType &haystack, const ElementType &needle) { - return std::find(haystack.begin(), haystack.end(), needle) - != haystack.end(); - } +/// moves the votes not present in known_keys from votes to return value +void Yac::removeUnknownPeersVotes( + std::vector &votes, + shared_model::interface::types::PeerList const &peers) { + auto known_keys = peers | boost::adaptors::transformed([](const auto &peer) { + return peer->pubkey(); + }); + removeMatching(votes, + [known_keys = std::move(known_keys), this](VoteMessage &vote) { + if (not contains(known_keys, vote.signature->publicKey())) { + log_->warn("Got a vote from an unknown peer: {}", vote); + return true; + } + return false; + }); +} - /// moves the votes not present in known_keys from votes to return value - void Yac::removeUnknownPeersVotes(std::vector &votes, - ClusterOrdering &order) { - auto known_keys = order.getPeers() - | boost::adaptors::transformed( - [](const auto &peer) { return peer->pubkey(); }); - removeMatching( - votes, - [known_keys = std::move(known_keys), this](VoteMessage &vote) { - if (not contains(known_keys, vote.signature->publicKey())) { - log_->warn("Got a vote from an unknown peer: {}", vote); - return true; - } - return false; - }); +std::optional Yac::onState( + std::vector state) { + removeUnknownPeersVotes(state, getCurrentOrder()); + if (state.empty()) { + log_->debug("No votes left in the message."); + return std::nullopt; + } + + if (crypto_->verify(state)) { + auto &proposal_round = getRound(state); + + if (proposal_round.block_round > round_.block_round) { + log_->info("Pass state from future for {} to pipeline", proposal_round); + future_states_[proposal_round].insert(state.begin(), state.end()); + return FutureMessage{std::move(state)}; + } + + if (proposal_round < round_) { + log_->info("Received state from past for {}, try to propagate back", + proposal_round); + tryPropagateBack(state); + return std::nullopt; + } + + if (alternative_order_) { + // filter votes with peers from cluster order to avoid the case when + // alternative peer is not present in cluster order + removeUnknownPeersVotes(state, cluster_order_); + if (state.empty()) { + log_->debug("No votes left in the message."); + return std::nullopt; } + } + + return applyState(state); + } + + log_->warn("Crypto verification failed for message. Votes: [{}]", + boost::algorithm::join( + state | boost::adaptors::transformed([](const auto &v) { + return v.signature->toString(); + }), + ", ")); + return std::nullopt; +} - void Yac::onState(std::vector state) { - std::unique_lock guard(mutex_); - - removeUnknownPeersVotes(state, getCurrentOrder()); - if (state.empty()) { - log_->debug("No votes left in the message."); - return; - } - - if (crypto_->verify(state)) { - auto &proposal_round = getRound(state); - - if (proposal_round.block_round > round_.block_round) { - guard.unlock(); - log_->info("Pass state from future for {} to pipeline", - proposal_round); - notifier_.get_subscriber().on_next(FutureMessage{std::move(state)}); - return; - } - - if (proposal_round.block_round < round_.block_round) { - log_->info("Received state from past for {}, try to propagate back", - proposal_round); - tryPropagateBack(state); - guard.unlock(); - return; - } - - if (alternative_order_) { - // filter votes with peers from cluster order to avoid the case when - // alternative peer is not present in cluster order - removeUnknownPeersVotes(state, cluster_order_); - if (state.empty()) { - log_->debug("No votes left in the message."); - return; - } - } - - applyState(state, guard); - } else { - log_->warn( - "Crypto verification failed for message. Votes: [{}]", - boost::algorithm::join( - state | boost::adaptors::transformed([](const auto &v) { - return v.signature->toString(); - }), - ", ")); - } - } +// ------|Private interface|------ - // ------|Private interface|------ +void Yac::votingStep(VoteMessage vote, + ClusterOrdering order, + uint32_t attempt) { + log_->info("votingStep got vote: {}, attempt {}", vote, attempt); - void Yac::votingStep(VoteMessage vote) { - std::unique_lock lock(mutex_); + auto committed = vote_storage_.isCommitted(vote.hash.vote_round); + if (committed) { + return; + } - auto committed = vote_storage_.isCommitted(vote.hash.vote_round); - if (committed) { - return; - } + enum { kRotatePeriod = 10 }; - auto &cluster_order = getCurrentOrder(); + if (0 != attempt && 0 == (attempt % kRotatePeriod)) { + vote_storage_.remove(vote.hash.vote_round); + } - const auto ¤t_leader = cluster_order.currentLeader(); + /** + * 3 attempts to build and commit block before we think that round is + * freezed + */ + if (attempt == kRotatePeriod) { + vote.hash.vote_hashes.proposal_hash.clear(); + vote.hash.vote_hashes.block_hash.clear(); + vote.hash.block_signature.reset(); + vote = crypto_->getVote(vote.hash); + } - log_->info("Vote {} to peer {}", vote, current_leader); + const auto ¤t_leader = order.currentLeader(); - propagateStateDirectly(current_leader, {vote}); - cluster_order.switchToNext(); - lock.unlock(); - timer_->invokeAfterDelay([this, vote] { this->votingStep(vote); }); - } + log_->info("Vote {} to peer {}", vote, current_leader); - void Yac::closeRound() { - timer_->deny(); - } + propagateStateDirectly(current_leader, {vote}); + order.switchToNext(); - ClusterOrdering &Yac::getCurrentOrder() { - return alternative_order_ ? *alternative_order_ : cluster_order_; - } + timer_->invokeAfterDelay([this, vote, order(std::move(order)), attempt] { + this->votingStep(vote, std::move(order), attempt + 1); + }); +} - boost::optional> - Yac::findPeer(const VoteMessage &vote) { - auto peers = cluster_order_.getPeers(); - auto it = - std::find_if(peers.begin(), peers.end(), [&](const auto &peer) { - return peer->pubkey() == vote.signature->publicKey(); - }); - return it != peers.end() ? boost::make_optional(std::move(*it)) - : boost::none; - } +shared_model::interface::types::PeerList &Yac::getCurrentOrder() { + return alternative_order_ ? *alternative_order_ : cluster_order_; +} - // ------|Apply data|------ - - void Yac::applyState(const std::vector &state, - std::unique_lock &lock) { - assert(lock.owns_lock()); - auto answer = - vote_storage_.store(state, cluster_order_.getNumberOfPeers()); - - // TODO 10.06.2018 andrei: IR-1407 move YAC propagation strategy to a - // separate entity - - iroha::match_in_place( - answer, - [&](const auto &answer) { - auto &proposal_round = getRound(state); - auto current_round = round_; - - /* - * It is possible that a new peer with an outdated peers list may - * collect an outcome from a smaller number of peers which are - * included in set of `f` peers in the system. The new peer will - * not accept our message with valid supermajority because he - * cannot apply votes from unknown peers. - */ - if (state.size() > 1 - or (proposal_round.block_round == current_round.block_round - and cluster_order_.getPeers().size() == 1)) { - // some peer has already collected commit/reject, so it is sent - if (vote_storage_.getProcessingState(proposal_round) - == ProposalState::kNotSentNotProcessed) { - vote_storage_.nextProcessingState(proposal_round); - log_->info( - "Received supermajority of votes for {}, skip " - "propagation", - proposal_round); - } - } - - auto processing_state = - vote_storage_.getProcessingState(proposal_round); - - auto votes = - [](const auto &state) -> const std::vector & { - return state.votes; - }; - - switch (processing_state) { - case ProposalState::kNotSentNotProcessed: - vote_storage_.nextProcessingState(proposal_round); - log_->info("Propagate state {} to whole network", - proposal_round); - this->propagateState(visit_in_place(answer, votes)); - break; - case ProposalState::kSentNotProcessed: - vote_storage_.nextProcessingState(proposal_round); - log_->info("Pass outcome for {} to pipeline", proposal_round); - lock.unlock(); - if (proposal_round >= current_round) { - this->closeRound(); - } - notifier_.get_subscriber().on_next(answer); - break; - case ProposalState::kSentProcessed: - if (current_round > proposal_round) - this->tryPropagateBack(state); - break; - } - }, - // sent a state which didn't match with current one - [&]() { this->tryPropagateBack(state); }); - if (lock.owns_lock()) { - lock.unlock(); - } - } +std::optional> Yac::findPeer( + const VoteMessage &vote) { + auto it = std::find_if( + cluster_order_.begin(), cluster_order_.end(), [&](const auto &peer) { + return peer->pubkey() == vote.signature->publicKey(); + }); + return it != cluster_order_.end() ? std::make_optional(*it) : std::nullopt; +} - void Yac::tryPropagateBack(const std::vector &state) { - // yac back propagation will work only if another peer is in - // propagation stage because if peer sends list of votes this means that - // state is already committed - if (state.size() != 1) { - return; - } - - vote_storage_.getLastFinalizedRound() | [&](const auto &last_round) { - if (getRound(state) <= last_round) { - vote_storage_.getState(last_round) | [&](const auto &last_state) { - this->findPeer(state.at(0)) | [&](const auto &from) { - log_->info("Propagate state {} directly to {}", - last_round, - from->address()); - auto votes = [](const auto &state) { return state.votes; }; - this->propagateStateDirectly(*from, - visit_in_place(last_state, votes)); - }; - }; - } - }; +// ------|Apply data|------ + +std::optional Yac::applyState( + const std::vector &state) { + auto answer = vote_storage_.store(state, cluster_order_.size()); + + // TODO 10.06.2018 andrei: IR-1407 move YAC propagation strategy to a + // separate entity + + if (answer) { + auto &proposal_round = getRound(state); + auto current_round = round_; + + /* + * It is possible that a new peer with an outdated peers list may + * collect an outcome from a smaller number of peers which are + * included in set of `f` peers in the system. The new peer will + * not accept our message with valid supermajority because he + * cannot apply votes from unknown peers. + */ + if (state.size() > 1 or cluster_order_.size() == 1) { + // some peer has already collected commit/reject, so it is sent + if (vote_storage_.getProcessingState(proposal_round) + == ProposalState::kNotSentNotProcessed) { + vote_storage_.nextProcessingState(proposal_round); + log_->info( + "Received supermajority of votes for {}, skip " + "propagation", + proposal_round); } + } + + auto processing_state = vote_storage_.getProcessingState(proposal_round); + + auto votes = [](const auto &state) -> const std::vector & { + return state.votes; + }; + + switch (processing_state) { + case ProposalState::kNotSentNotProcessed: + vote_storage_.nextProcessingState(proposal_round); + log_->info("Propagate state {} to whole network", proposal_round); + propagateState(visit_in_place(*answer, votes)); + break; + case ProposalState::kSentNotProcessed: + vote_storage_.nextProcessingState(proposal_round); + log_->info("Pass outcome for {} to pipeline", proposal_round); + return *answer; + case ProposalState::kSentProcessed: + if (current_round > proposal_round) + tryPropagateBack(state); + break; + } + } + return std::nullopt; +} - // ------|Propagation|------ +void Yac::tryPropagateBack(const std::vector &state) { + // yac back propagation will work only if another peer is in + // propagation stage because if peer sends list of votes this means that + // state is already committed + if (state.size() != 1) { + return; + } + + vote_storage_.getLastFinalizedRound() | [&](const auto &last_round) { + if (getRound(state) <= last_round) { + vote_storage_.getState(last_round) | [&](const auto &last_state) { + this->findPeer(state.at(0)) | [&](const auto &from) { + log_->info( + "Propagate state {} directly to {}", last_round, from->address()); + auto votes = [](const auto &state) { return state.votes; }; + this->propagateStateDirectly(*from, + visit_in_place(last_state, votes)); + }; + }; + } + }; +} - void Yac::propagateState(const std::vector &msg) { - for (const auto &peer : cluster_order_.getPeers()) { - propagateStateDirectly(*peer, msg); - } - } +// ------|Propagation|------ - void Yac::propagateStateDirectly(const shared_model::interface::Peer &to, - const std::vector &msg) { - network_->sendState(to, msg); - } +void Yac::propagateState(const std::vector &msg) { + for (const auto &peer : cluster_order_) { + propagateStateDirectly(*peer, msg); + } +} - } // namespace yac - } // namespace consensus -} // namespace iroha +void Yac::propagateStateDirectly(const shared_model::interface::Peer &to, + const std::vector &msg) { + network_->sendState(to, msg); +} diff --git a/irohad/consensus/yac/impl/yac_crypto_provider_impl.cpp b/irohad/consensus/yac/impl/yac_crypto_provider_impl.cpp index b521be390ad..7fa6ebf13b0 100644 --- a/irohad/consensus/yac/impl/yac_crypto_provider_impl.cpp +++ b/irohad/consensus/yac/impl/yac_crypto_provider_impl.cpp @@ -13,56 +13,49 @@ #include "interfaces/common_objects/string_view_types.hpp" #include "logger/logger.hpp" -namespace iroha { - namespace consensus { - namespace yac { - CryptoProviderImpl::CryptoProviderImpl( - const shared_model::crypto::Keypair &keypair, logger::LoggerPtr log) - : keypair_(keypair), log_(std::move(log)) {} - - bool CryptoProviderImpl::verify(const std::vector &msg) { - return std::all_of( - std::begin(msg), std::end(msg), [this](const auto &vote) { - auto serialized = - PbConverters::serializeVote(vote).hash().SerializeAsString(); - auto blob = shared_model::crypto::Blob(serialized); - - using namespace shared_model::interface::types; - return shared_model::crypto::CryptoVerifier::verify( - SignedHexStringView{vote.signature->signedData()}, - blob, - PublicKeyHexStringView{vote.signature->publicKey()}) - .match([](const auto &) { return true; }, - [this](const auto &error) { - log_->debug("Vote signature verification failed: {}", - error.error); - return false; - }); - }); - } - - VoteMessage CryptoProviderImpl::getVote(YacHash hash) { - VoteMessage vote; - vote.hash = hash; - auto serialized = - PbConverters::serializeVotePayload(vote).hash().SerializeAsString(); - auto blob = shared_model::crypto::Blob(serialized); - const auto &pubkey = keypair_.publicKey(); - const auto &privkey = keypair_.privateKey(); - using namespace shared_model::interface::types; - auto signature = shared_model::crypto::CryptoSigner::sign( - blob, - shared_model::crypto::Keypair(PublicKeyHexStringView{pubkey}, - privkey)); - - // TODO 30.08.2018 andrei: IR-1670 Remove optional from YAC - // CryptoProviderImpl::getVote - vote.signature = std::make_shared( - SignedHexStringView{signature}, PublicKeyHexStringView{pubkey}); - - return vote; - } - - } // namespace yac - } // namespace consensus -} // namespace iroha +using iroha::consensus::yac::CryptoProviderImpl; + +CryptoProviderImpl::CryptoProviderImpl( + const shared_model::crypto::Keypair &keypair, logger::LoggerPtr log) + : keypair_(keypair), log_(std::move(log)) {} + +bool CryptoProviderImpl::verify(const std::vector &msg) { + return std::all_of(std::begin(msg), std::end(msg), [this](const auto &vote) { + auto serialized = + PbConverters::serializeVote(vote).hash().SerializeAsString(); + auto blob = shared_model::crypto::Blob(serialized); + + using namespace shared_model::interface::types; + return shared_model::crypto::CryptoVerifier::verify( + SignedHexStringView{vote.signature->signedData()}, + blob, + PublicKeyHexStringView{vote.signature->publicKey()}) + .match([](const auto &) { return true; }, + [this](const auto &error) { + log_->debug("Vote signature verification failed: {}", + error.error); + return false; + }); + }); +} + +iroha::consensus::yac::VoteMessage CryptoProviderImpl::getVote(YacHash hash) { + VoteMessage vote; + vote.hash = hash; + auto serialized = + PbConverters::serializeVotePayload(vote).hash().SerializeAsString(); + auto blob = shared_model::crypto::Blob(serialized); + const auto &pubkey = keypair_.publicKey(); + const auto &privkey = keypair_.privateKey(); + using namespace shared_model::interface::types; + auto signature = shared_model::crypto::CryptoSigner::sign( + blob, + shared_model::crypto::Keypair(PublicKeyHexStringView{pubkey}, privkey)); + + // TODO 30.08.2018 andrei: IR-1670 Remove optional from YAC + // CryptoProviderImpl::getVote + vote.signature = std::make_shared( + SignedHexStringView{signature}, PublicKeyHexStringView{pubkey}); + + return vote; +} diff --git a/irohad/consensus/yac/impl/yac_crypto_provider_impl.hpp b/irohad/consensus/yac/impl/yac_crypto_provider_impl.hpp index e58bdd231f2..17797c3926c 100644 --- a/irohad/consensus/yac/impl/yac_crypto_provider_impl.hpp +++ b/irohad/consensus/yac/impl/yac_crypto_provider_impl.hpp @@ -11,25 +11,21 @@ #include "cryptography/keypair.hpp" #include "logger/logger_fwd.hpp" -namespace iroha { - namespace consensus { - namespace yac { - class CryptoProviderImpl : public YacCryptoProvider { - public: - CryptoProviderImpl(const shared_model::crypto::Keypair &keypair, - logger::LoggerPtr log); - - // TODO 18.04.2020 IR-710 @mboldyrev: make it return Result - bool verify(const std::vector &msg) override; - - VoteMessage getVote(YacHash hash) override; - - private: - shared_model::crypto::Keypair keypair_; - logger::LoggerPtr log_; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + class CryptoProviderImpl : public YacCryptoProvider { + public: + CryptoProviderImpl(const shared_model::crypto::Keypair &keypair, + logger::LoggerPtr log); + + // TODO 18.04.2020 IR-710 @mboldyrev: make it return Result + bool verify(const std::vector &msg) override; + + VoteMessage getVote(YacHash hash) override; + + private: + shared_model::crypto::Keypair keypair_; + logger::LoggerPtr log_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_CRYPTO_PROVIDER_IMPL_HPP diff --git a/irohad/consensus/yac/impl/yac_gate_impl.cpp b/irohad/consensus/yac/impl/yac_gate_impl.cpp index 12a556aa0ac..46049899238 100644 --- a/irohad/consensus/yac/impl/yac_gate_impl.cpp +++ b/irohad/consensus/yac/impl/yac_gate_impl.cpp @@ -6,9 +6,6 @@ #include "consensus/yac/impl/yac_gate_impl.hpp" #include -#include -#include -#include #include "common/visitor.hpp" #include "consensus/yac/cluster_order.hpp" #include "consensus/yac/outcome_messages.hpp" @@ -31,240 +28,205 @@ namespace { } } // namespace -namespace iroha { - namespace consensus { - namespace yac { - - YacGateImpl::YacGateImpl( - std::shared_ptr hash_gate, - std::shared_ptr orderer, - boost::optional alternative_order, - std::shared_ptr hash_provider, - std::shared_ptr block_creator, - std::shared_ptr - consensus_result_cache, - logger::LoggerPtr log, - std::function - delay_func) - : log_(std::move(log)), - current_hash_(), - alternative_order_(std::move(alternative_order)), - published_events_([&] { - rxcpp::observable outcomes = hash_gate->onOutcome(); - rxcpp::observable delayed_outcomes = outcomes.concat_map( - [delay_func = std::move(delay_func)](auto message) { - auto delay = delay_func(visit_in_place( - message, - [](const CommitMessage &msg) { - auto const hash = getHash(msg.votes).value(); - if (hash.vote_hashes.proposal_hash.empty()) { - return ConsensusOutcomeType::kNothing; - } - return ConsensusOutcomeType::kCommit; - }, - [](const RejectMessage &msg) { - return ConsensusOutcomeType::kReject; - }, - [](const FutureMessage &msg) { - return ConsensusOutcomeType::kFuture; - })); - rxcpp::observable just_message = - rxcpp::observable<>::just(std::move(message)); - rxcpp::observable delayed_message = - just_message.delay(delay, - rxcpp::identity_current_thread()); - return delayed_message; - }, - rxcpp::identity_current_thread()); - rxcpp::observable events = - delayed_outcomes.flat_map([this](auto message) { - return visit_in_place(message, - [this](const CommitMessage &msg) { - return this->handleCommit(msg); - }, - [this](const RejectMessage &msg) { - return this->handleReject(msg); - }, - [this](const FutureMessage &msg) { - return this->handleFuture(msg); - }); - }); - rxcpp::connectable_observable published_events = - events.publish(); - rxcpp::observable published_ref_counted_events = - published_events.ref_count(); - return published_ref_counted_events; - }()), - orderer_(std::move(orderer)), - hash_provider_(std::move(hash_provider)), - block_creator_(std::move(block_creator)), - consensus_result_cache_(std::move(consensus_result_cache)), - hash_gate_(std::move(hash_gate)) { - block_creator_->onBlock().subscribe( - [this](const auto &event) { this->vote(event); }); - } - - void YacGateImpl::vote(const simulator::BlockCreatorEvent &event) { - if (current_hash_.vote_round >= event.round) { - log_->info( - "Current round {} is greater than or equal to vote round {}, " - "skipped", - current_hash_.vote_round, - event.round); - return; - } - - current_ledger_state_ = event.ledger_state; - current_hash_ = hash_provider_->makeHash(event); - assert(current_hash_.vote_round.block_round - == current_ledger_state_->top_block_info.height + 1); - - if (not event.round_data) { - current_block_ = boost::none; - // previous block is committed to block storage, it is safe to clear - // the cache - // TODO 2019-03-15 andrei: IR-405 Subscribe BlockLoaderService to - // BlockCreator::onBlock - consensus_result_cache_->release(); - log_->debug("Agreed on nothing to commit"); - } else { - current_block_ = event.round_data->block; - // insert the block we voted for to the consensus cache - consensus_result_cache_->insert(event.round_data->block); - log_->info("vote for (proposal: {}, block: {})", - current_hash_.vote_hashes.proposal_hash, - current_hash_.vote_hashes.block_hash); - } - - auto order = orderer_->getOrdering(current_hash_, - event.ledger_state->ledger_peers); - if (not order) { - log_->error("ordering doesn't provide peers => pass round"); - return; - } - - hash_gate_->vote(current_hash_, *order, std::move(alternative_order_)); - alternative_order_.reset(); - } - - rxcpp::observable YacGateImpl::onOutcome() { - return published_events_; - } - - void YacGateImpl::stop() { - hash_gate_->stop(); - } - - void YacGateImpl::copySignatures(const CommitMessage &commit) { - for (const auto &vote : commit.votes) { - auto sig = vote.hash.block_signature; - current_block_.value()->addSignature( - shared_model::interface::types::SignedHexStringView{ - sig->signedData()}, - shared_model::interface::types::PublicKeyHexStringView{ - sig->publicKey()}); - } - } +using iroha::consensus::yac::YacGateImpl; + +YacGateImpl::YacGateImpl( + std::shared_ptr hash_gate, + std::shared_ptr orderer, + std::optional alternative_order, + std::shared_ptr ledger_state, + std::shared_ptr hash_provider, + std::shared_ptr consensus_result_cache, + logger::LoggerPtr log) + : log_(std::move(log)), + current_hash_(), + alternative_order_(std::move(alternative_order)), + current_ledger_state_(std::move(ledger_state)), + orderer_(std::move(orderer)), + hash_provider_(std::move(hash_provider)), + consensus_result_cache_(std::move(consensus_result_cache)), + hash_gate_(std::move(hash_gate)) {} + +void YacGateImpl::vote(const simulator::BlockCreatorEvent &event) { + if (current_hash_.vote_round != event.round) { + log_->info("Current round {} not equal to vote round {}, skipped", + current_hash_.vote_round, + event.round); + return; + } - rxcpp::observable YacGateImpl::handleCommit( - const CommitMessage &msg) { - const auto hash = getHash(msg.votes).value(); - if (hash.vote_round < current_hash_.vote_round) { - log_->info( - "Current round {} is greater than commit round {}, skipped", - current_hash_.vote_round, - hash.vote_round); - return rxcpp::observable<>::empty(); - } + current_ledger_state_ = event.ledger_state; + current_hash_ = hash_provider_->makeHash(event); + assert(current_hash_.vote_round.block_round + == current_ledger_state_->top_block_info.height + 1); + + if (not event.round_data) { + current_block_ = std::nullopt; + // previous block is committed to block storage, it is safe to clear + // the cache + // TODO 2019-03-15 andrei: IR-405 Subscribe BlockLoaderService to + // BlockCreator::onBlock + consensus_result_cache_->release(); + log_->debug("Agreed on nothing to commit"); + } else { + current_block_ = event.round_data->block; + // insert the block we voted for to the consensus cache + consensus_result_cache_->insert(event.round_data->block); + log_->info("vote for (proposal: {}, block: {})", + current_hash_.vote_hashes.proposal_hash, + current_hash_.vote_hashes.block_hash); + } - assert(hash.vote_round.block_round - == current_hash_.vote_round.block_round); + auto order = + orderer_->getOrdering(current_hash_, event.ledger_state->ledger_peers); + if (not order) { + log_->error("ordering doesn't provide peers => pass round"); + return; + } - if (hash == current_hash_ and current_block_) { - // if node has voted for the committed block - // append signatures of other nodes - this->copySignatures(msg); - auto &block = current_block_.value(); - log_->info("consensus: commit top block: height {}, hash {}", - block->height(), - block->hash().hex()); - return rxcpp::observable<>::just(PairValid( - current_hash_.vote_round, current_ledger_state_, block)); - } + hash_gate_->vote(current_hash_, *order, std::move(alternative_order_)); + alternative_order_.reset(); +} + +std::optional YacGateImpl::processOutcome( + Answer const &outcome) { + return visit_in_place( + outcome, + [this](const CommitMessage &msg) { return this->handleCommit(msg); }, + [this](const RejectMessage &msg) { return this->handleReject(msg); }, + [this](const FutureMessage &msg) { return this->handleFuture(msg); }); +} + +void YacGateImpl::stop() { + hash_gate_->stop(); +} + +std::optional YacGateImpl::processRoundSwitch( + consensus::Round const &round, + std::shared_ptr ledger_state) { + current_hash_ = YacHash(); + current_hash_.vote_round = round; + current_ledger_state_ = std::move(ledger_state); + current_block_ = std::nullopt; + consensus_result_cache_->release(); + if (auto answer = hash_gate_->processRoundSwitch( + current_hash_.vote_round, current_ledger_state_->ledger_peers)) { + return processOutcome(*answer); + } + return std::nullopt; +} + +void YacGateImpl::copySignatures(const CommitMessage &commit) { + for (const auto &vote : commit.votes) { + auto sig = vote.hash.block_signature; + current_block_.value()->addSignature( + shared_model::interface::types::SignedHexStringView{sig->signedData()}, + shared_model::interface::types::PublicKeyHexStringView{ + sig->publicKey()}); + } +} + +std::optional YacGateImpl::handleCommit( + const CommitMessage &msg) { + const auto hash = getHash(msg.votes).value(); + if (hash.vote_round < current_hash_.vote_round) { + log_->info("Current round {} is greater than commit round {}, skipped", + current_hash_.vote_round, + hash.vote_round); + return std::nullopt; + } - auto public_keys = getPublicKeys(msg.votes); + assert(hash.vote_round.block_round == current_hash_.vote_round.block_round); + assert(hash.vote_round.block_round + == current_ledger_state_->top_block_info.height + 1); + + if (hash == current_hash_ and current_block_) { + // if node has voted for the committed block + // append signatures of other nodes + this->copySignatures(msg); + auto &block = current_block_.value(); + log_->info("consensus: commit top block: height {}, hash {}", + block->height(), + block->hash().hex()); + return PairValid(current_hash_.vote_round, current_ledger_state_, block); + } - if (hash.vote_hashes.proposal_hash.empty()) { - // if consensus agreed on nothing for commit - log_->info("Consensus skipped round, voted for nothing"); - current_block_ = boost::none; - return rxcpp::observable<>::just(AgreementOnNone( - hash.vote_round, current_ledger_state_, std::move(public_keys))); - } + auto public_keys = getPublicKeys(msg.votes); - log_->info("Voted for another block, waiting for sync"); - current_block_ = boost::none; - auto model_hash = hash_provider_->toModelHash(hash); - return rxcpp::observable<>::just( - VoteOther(hash.vote_round, - current_ledger_state_, - std::move(public_keys), - std::move(model_hash))); - } + if (hash.vote_hashes.proposal_hash.empty()) { + // if consensus agreed on nothing for commit + log_->info("Consensus skipped round, voted for nothing"); + current_block_ = std::nullopt; + return AgreementOnNone( + hash.vote_round, current_ledger_state_, std::move(public_keys)); + } - rxcpp::observable YacGateImpl::handleReject( - const RejectMessage &msg) { - const auto hash = getHash(msg.votes).value(); - auto public_keys = getPublicKeys(msg.votes); - if (hash.vote_round < current_hash_.vote_round) { - log_->info( - "Current round {} is greater than reject round {}, skipped", - current_hash_.vote_round, - hash.vote_round); - return rxcpp::observable<>::empty(); - } + log_->info("Voted for another block, waiting for sync"); + current_block_ = std::nullopt; + auto model_hash = hash_provider_->toModelHash(hash); + return VoteOther(hash.vote_round, + current_ledger_state_, + std::move(public_keys), + std::move(model_hash)); +} + +std::optional YacGateImpl::handleReject( + const RejectMessage &msg) { + const auto hash = getHash(msg.votes).value(); + auto public_keys = getPublicKeys(msg.votes); + if (hash.vote_round < current_hash_.vote_round) { + log_->info("Current round {} is greater than reject round {}, skipped", + current_hash_.vote_round, + hash.vote_round); + return std::nullopt; + } - assert(hash.vote_round.block_round - == current_hash_.vote_round.block_round); + assert(hash.vote_round.block_round == current_hash_.vote_round.block_round); + assert(hash.vote_round.block_round + == current_ledger_state_->top_block_info.height + 1); - auto has_same_proposals = - std::all_of(std::next(msg.votes.begin()), - msg.votes.end(), - [first = msg.votes.begin()](const auto ¤t) { - return first->hash.vote_hashes.proposal_hash - == current.hash.vote_hashes.proposal_hash; - }); - if (not has_same_proposals) { - log_->info("Proposal reject since all hashes are different"); - return rxcpp::observable<>::just(ProposalReject( - hash.vote_round, current_ledger_state_, std::move(public_keys))); - } - log_->info("Block reject since proposal hashes match"); - return rxcpp::observable<>::just(BlockReject( - hash.vote_round, current_ledger_state_, std::move(public_keys))); - } + auto has_same_proposals = + std::all_of(std::next(msg.votes.begin()), + msg.votes.end(), + [first = msg.votes.begin()](const auto ¤t) { + return first->hash.vote_hashes.proposal_hash + == current.hash.vote_hashes.proposal_hash; + }); + if (not has_same_proposals) { + log_->info("Proposal reject since all hashes are different"); + return ProposalReject( + hash.vote_round, current_ledger_state_, std::move(public_keys)); + } + log_->info("Block reject since proposal hashes match"); + return BlockReject( + hash.vote_round, current_ledger_state_, std::move(public_keys)); +} + +std::optional YacGateImpl::handleFuture( + const FutureMessage &msg) { + const auto hash = getHash(msg.votes).value(); + auto public_keys = getPublicKeys(msg.votes); + if (hash.vote_round.block_round <= current_hash_.vote_round.block_round) { + log_->info( + "Current block round {} is not lower than future block round {}, " + "skipped", + current_hash_.vote_round.block_round, + hash.vote_round.block_round); + return std::nullopt; + } - rxcpp::observable YacGateImpl::handleFuture( - const FutureMessage &msg) { - const auto hash = getHash(msg.votes).value(); - auto public_keys = getPublicKeys(msg.votes); - if (hash.vote_round.block_round - <= current_hash_.vote_round.block_round) { - log_->info( - "Current block round {} is not lower than future block round {}, " - "skipped", - current_hash_.vote_round.block_round, - hash.vote_round.block_round); - return rxcpp::observable<>::empty(); - } + if (current_ledger_state_->top_block_info.height + 1 + >= hash.vote_round.block_round) { + log_->info( + "Difference between top height {} and future block round {} is " + "less than 2, skipped", + current_ledger_state_->top_block_info.height, + hash.vote_round.block_round); + return std::nullopt; + } - assert(hash.vote_round.block_round - > current_hash_.vote_round.block_round); + assert(hash.vote_round.block_round > current_hash_.vote_round.block_round); - log_->info("Message from future, waiting for sync"); - return rxcpp::observable<>::just(Future( - hash.vote_round, current_ledger_state_, std::move(public_keys))); - } - } // namespace yac - } // namespace consensus -} // namespace iroha + log_->info("Message from future, waiting for sync"); + return Future(hash.vote_round, current_ledger_state_, std::move(public_keys)); +} diff --git a/irohad/consensus/yac/impl/yac_gate_impl.hpp b/irohad/consensus/yac/impl/yac_gate_impl.hpp index 3f6e7635e82..47935d5c640 100644 --- a/irohad/consensus/yac/impl/yac_gate_impl.hpp +++ b/irohad/consensus/yac/impl/yac_gate_impl.hpp @@ -9,80 +9,72 @@ #include "consensus/yac/yac_gate.hpp" #include +#include -#include #include "consensus/consensus_block_cache.hpp" +#include "consensus/gate_object.hpp" #include "consensus/yac/consensus_outcome_type.hpp" -#include "consensus/yac/impl/consensus_outcome_delay.hpp" +#include "consensus/yac/storage/storage_result.hpp" #include "consensus/yac/yac_hash_provider.hpp" #include "logger/logger_fwd.hpp" namespace iroha { - namespace simulator { class BlockCreator; } - namespace network { class BlockLoader; } - - namespace consensus { - namespace yac { - - struct CommitMessage; - class YacPeerOrderer; - - class YacGateImpl : public YacGate { - public: - YacGateImpl( - std::shared_ptr hash_gate, - std::shared_ptr orderer, - boost::optional alternative_order, - std::shared_ptr hash_provider, - std::shared_ptr block_creator, - std::shared_ptr - consensus_result_cache, - logger::LoggerPtr log, - std::function - delay_func = - ConsensusOutcomeDelay(std::chrono::milliseconds(0))); - void vote(const simulator::BlockCreatorEvent &event) override; - - rxcpp::observable onOutcome() override; - - void stop() override; - - private: - /** - * Update current block with signatures from commit message - * @param commit - commit message to get signatures from - */ - void copySignatures(const CommitMessage &commit); - - rxcpp::observable handleCommit(const CommitMessage &msg); - rxcpp::observable handleReject(const RejectMessage &msg); - rxcpp::observable handleFuture(const FutureMessage &msg); - - logger::LoggerPtr log_; - - boost::optional> - current_block_; - YacHash current_hash_; - boost::optional alternative_order_; - std::shared_ptr current_ledger_state_; - - rxcpp::observable published_events_; - std::shared_ptr orderer_; - std::shared_ptr hash_provider_; - std::shared_ptr block_creator_; - std::shared_ptr - consensus_result_cache_; - std::shared_ptr hash_gate_; - }; - - } // namespace yac - } // namespace consensus } // namespace iroha +namespace iroha::consensus::yac { + struct CommitMessage; + class YacPeerOrderer; + + class YacGateImpl : public YacGate { + public: + YacGateImpl( + std::shared_ptr hash_gate, + std::shared_ptr orderer, + std::optional alternative_order, + std::shared_ptr ledger_state, + std::shared_ptr hash_provider, + std::shared_ptr consensus_result_cache, + logger::LoggerPtr log); + void vote(const simulator::BlockCreatorEvent &event) override; + + std::optional processOutcome(Answer const &outcome); + + void stop() override; + + std::optional processRoundSwitch( + consensus::Round const &round, + std::shared_ptr ledger_state); + + private: + /** + * Update current block with signatures from commit message + * @param commit - commit message to get signatures from + */ + void copySignatures(const CommitMessage &commit); + + std::optional handleCommit(const CommitMessage &msg); + std::optional handleReject(const RejectMessage &msg); + std::optional handleFuture(const FutureMessage &msg); + + logger::LoggerPtr log_; + + std::optional> + current_block_; + YacHash current_hash_; + std::optional alternative_order_; + std::shared_ptr current_ledger_state_; + + std::shared_ptr orderer_; + std::shared_ptr hash_provider_; + std::shared_ptr consensus_result_cache_; + std::shared_ptr hash_gate_; + }; +} // namespace iroha::consensus::yac + #endif // IROHA_YAC_GATE_IMPL_HPP diff --git a/irohad/consensus/yac/impl/yac_hash_provider_impl.cpp b/irohad/consensus/yac/impl/yac_hash_provider_impl.cpp index 29d5eda0409..a81d67a3811 100644 --- a/irohad/consensus/yac/impl/yac_hash_provider_impl.cpp +++ b/irohad/consensus/yac/impl/yac_hash_provider_impl.cpp @@ -7,33 +7,26 @@ #include "interfaces/iroha_internal/block.hpp" #include "interfaces/iroha_internal/proposal.hpp" -namespace iroha { - namespace consensus { - namespace yac { +using iroha::consensus::yac::YacHashProviderImpl; - YacHash YacHashProviderImpl::makeHash( - const simulator::BlockCreatorEvent &event) const { - YacHash result; - if (event.round_data) { - result.vote_hashes.proposal_hash = - event.round_data->proposal->hash().hex(); - result.vote_hashes.block_hash = event.round_data->block->hash().hex(); - result.block_signature = - clone(event.round_data->block->signatures().front()); - } - result.vote_round = event.round; +iroha::consensus::yac::YacHash YacHashProviderImpl::makeHash( + const simulator::BlockCreatorEvent &event) const { + YacHash result; + if (event.round_data) { + result.vote_hashes.proposal_hash = event.round_data->proposal->hash().hex(); + result.vote_hashes.block_hash = event.round_data->block->hash().hex(); + result.block_signature = + clone(event.round_data->block->signatures().front()); + } + result.vote_round = event.round; - return result; - } + return result; +} - shared_model::interface::types::HashType YacHashProviderImpl::toModelHash( - const YacHash &hash) const { - auto blob = shared_model::crypto::Blob::fromHexString( - hash.vote_hashes.block_hash); - auto string_blob = shared_model::crypto::toBinaryString(blob); - return shared_model::interface::types::HashType(string_blob); - } - - } // namespace yac - } // namespace consensus -} // namespace iroha +shared_model::interface::types::HashType YacHashProviderImpl::toModelHash( + const YacHash &hash) const { + auto blob = + shared_model::crypto::Blob::fromHexString(hash.vote_hashes.block_hash); + auto string_blob = shared_model::crypto::toBinaryString(blob); + return shared_model::interface::types::HashType(string_blob); +} diff --git a/irohad/consensus/yac/impl/yac_hash_provider_impl.hpp b/irohad/consensus/yac/impl/yac_hash_provider_impl.hpp index 74ba3a1faf4..245bf416fe2 100644 --- a/irohad/consensus/yac/impl/yac_hash_provider_impl.hpp +++ b/irohad/consensus/yac/impl/yac_hash_provider_impl.hpp @@ -8,19 +8,14 @@ #include "consensus/yac/yac_hash_provider.hpp" -namespace iroha { - namespace consensus { - namespace yac { - class YacHashProviderImpl : public YacHashProvider { - public: - YacHash makeHash( - const simulator::BlockCreatorEvent &event) const override; +namespace iroha::consensus::yac { + class YacHashProviderImpl : public YacHashProvider { + public: + YacHash makeHash(const simulator::BlockCreatorEvent &event) const override; - shared_model::interface::types::HashType toModelHash( - const YacHash &hash) const override; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + shared_model::interface::types::HashType toModelHash( + const YacHash &hash) const override; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_HASH_PROVIDER_IMPL_HPP diff --git a/irohad/consensus/yac/outcome_messages.hpp b/irohad/consensus/yac/outcome_messages.hpp index 350b7333b56..2c34ad61895 100644 --- a/irohad/consensus/yac/outcome_messages.hpp +++ b/irohad/consensus/yac/outcome_messages.hpp @@ -11,73 +11,68 @@ #include "consensus/yac/vote_message.hpp" #include "utils/string_builder.hpp" -namespace iroha { - namespace consensus { - namespace yac { +namespace iroha::consensus::yac { + template + struct OutcomeMessage { + explicit OutcomeMessage(std::vector votes) + : votes(std::move(votes)) {} - template - struct OutcomeMessage { - explicit OutcomeMessage(std::vector votes) - : votes(std::move(votes)) {} + OutcomeMessage(std::initializer_list votes) : votes(votes) {} - OutcomeMessage(std::initializer_list votes) - : votes(votes) {} + std::vector votes; - std::vector votes; + bool operator==(const OutcomeMessage &rhs) const { + return votes == rhs.votes; + } - bool operator==(const OutcomeMessage &rhs) const { - return votes == rhs.votes; - } + std::string toString() const { + return shared_model::detail::PrettyStringBuilder() + .init(typeName()) + .appendNamed("votes", votes) + .finalize(); + } - std::string toString() const { - return shared_model::detail::PrettyStringBuilder() - .init(typeName()) - .appendNamed("votes", votes) - .finalize(); - } + virtual const std::string &typeName() const = 0; - virtual const std::string &typeName() const = 0; + protected: + ~OutcomeMessage() = default; + }; - protected: - ~OutcomeMessage() = default; - }; + /** + * CommitMsg means consensus on cluster achieved. + * All nodes deals on some solution + */ + struct CommitMessage final : OutcomeMessage { + using OutcomeMessage::OutcomeMessage; + const std::string &typeName() const override { + const static std::string name{"CommitMessage"}; + return name; + } + }; - /** - * CommitMsg means consensus on cluster achieved. - * All nodes deals on some solution - */ - struct CommitMessage final : OutcomeMessage { - using OutcomeMessage::OutcomeMessage; - const std::string &typeName() const override { - const static std::string name{"CommitMessage"}; - return name; - } - }; + /** + * Reject means that there is impossible + * to collect supermajority for any block + */ + struct RejectMessage final : OutcomeMessage { + using OutcomeMessage::OutcomeMessage; + const std::string &typeName() const override { + const static std::string name{"RejectMessage"}; + return name; + } + }; - /** - * Reject means that there is impossible - * to collect supermajority for any block - */ - struct RejectMessage final : OutcomeMessage { - using OutcomeMessage::OutcomeMessage; - const std::string &typeName() const override { - const static std::string name{"RejectMessage"}; - return name; - } - }; + /** + * Represents the case when the round number is greater than the current, + * and the quorum is unknown + */ + struct FutureMessage final : OutcomeMessage { + using OutcomeMessage::OutcomeMessage; + const std::string &typeName() const override { + const static std::string name{"FutureMessage"}; + return name; + } + }; +} // namespace iroha::consensus::yac - /** - * Represents the case when the round number is greater than the current, - * and the quorum is unknown - */ - struct FutureMessage final : OutcomeMessage { - using OutcomeMessage::OutcomeMessage; - const std::string &typeName() const override { - const static std::string name{"FutureMessage"}; - return name; - } - }; - } // namespace yac - } // namespace consensus -} // namespace iroha #endif // IROHA_MESSAGES_HPP diff --git a/irohad/consensus/yac/storage/buffered_cleanup_strategy.hpp b/irohad/consensus/yac/storage/buffered_cleanup_strategy.hpp index e83bf928850..a964896483e 100644 --- a/irohad/consensus/yac/storage/buffered_cleanup_strategy.hpp +++ b/irohad/consensus/yac/storage/buffered_cleanup_strategy.hpp @@ -13,65 +13,61 @@ #include "consensus/yac/outcome_messages.hpp" -namespace iroha { - namespace consensus { - namespace yac { - class BufferedCleanupStrategy : public CleanupStrategy { - public: - using RoundType = Round; +namespace iroha::consensus::yac { + class BufferedCleanupStrategy : public CleanupStrategy { + public: + using RoundType = Round; - /** - * The method finalizes passed round. On Commit message it purges last - * reject round if commit is greater. - * @param consensus_round - finalized round of the consensus - * @param answer - the output of the round - * @return rounds to be removed, if any. - */ - boost::optional finalize( - RoundType consensus_round, Answer answer) override; + /** + * The method finalizes passed round. On Commit message it purges last + * reject round if commit is greater. + * @param consensus_round - finalized round of the consensus + * @param answer - the output of the round + * @return rounds to be removed, if any. + */ + boost::optional finalize( + RoundType consensus_round, Answer answer) override; - bool shouldCreateRound(const RoundType &round) override; + bool shouldCreateRound(const RoundType &round) override; - private: - /** - * Remove all rounds before last committed - * @return rounds to be removed. Also, the same rounds are removed from - * created_rounds_ collection - */ - RoundsType truncateCreatedRounds(); + private: + /** + * Remove all rounds before last committed + * @return rounds to be removed. Also, the same rounds are removed from + * created_rounds_ collection + */ + RoundsType truncateCreatedRounds(); - /** - * @return the lowest round from last committed and last rejected - * rounds, if the operation can't be applied - returns none - */ - boost::optional minimalRound() const; + /** + * @return the lowest round from last committed and last rejected + * rounds, if the operation can't be applied - returns none + */ + boost::optional minimalRound() const; - /** - * The method creates round into created_rounds_ collection - * @param round - round for insertion - */ - void createRound(const Round &round); + /** + * The method creates round into created_rounds_ collection + * @param round - round for insertion + */ + void createRound(const Round &round); - /** - * Checks whether we should add round into created_rounds_ collection - * @param round - round for checking - * @return true if could be inserted - */ - bool isRequiredCreation(const Round &round) const; + /** + * Checks whether we should add round into created_rounds_ collection + * @param round - round for checking + * @return true if could be inserted + */ + bool isRequiredCreation(const Round &round) const; - /// all stored rounds - std::priority_queue, - std::greater> - created_rounds_; + /// all stored rounds + std::priority_queue, + std::greater> + created_rounds_; - /// maximal reject round, could empty if commit happened - boost::optional last_reject_round_; - /// maximal commit round - boost::optional last_commit_round_; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + /// maximal reject round, could empty if commit happened + boost::optional last_reject_round_; + /// maximal commit round + boost::optional last_commit_round_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_BUFFERED_CLEANUP_STRATEGY_HPP diff --git a/irohad/consensus/yac/storage/cleanup_strategy.hpp b/irohad/consensus/yac/storage/cleanup_strategy.hpp index b25a93144ea..6733ee6beea 100644 --- a/irohad/consensus/yac/storage/cleanup_strategy.hpp +++ b/irohad/consensus/yac/storage/cleanup_strategy.hpp @@ -13,36 +13,32 @@ #include "consensus/round.hpp" #include "consensus/yac/storage/storage_result.hpp" -namespace iroha { - namespace consensus { - namespace yac { - class CleanupStrategy { - public: - /** - * Collection of rounds type - */ - using RoundsType = std::vector; - - /** - * Notify strategy about new rounds - * @param round - new round - * @param answer - outcome of round - * @return a collection of rounds for removing from the state - */ - virtual boost::optional finalize(Round round, - Answer answer) = 0; - - /** - * The method checks whether we should add a new round - * @param round - round for creation - * @return true if round should be created - */ - virtual bool shouldCreateRound(const Round &round) = 0; - - virtual ~CleanupStrategy() = default; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + class CleanupStrategy { + public: + /** + * Collection of rounds type + */ + using RoundsType = std::vector; + + /** + * Notify strategy about new rounds + * @param round - new round + * @param answer - outcome of round + * @return a collection of rounds for removing from the state + */ + virtual boost::optional finalize(Round round, + Answer answer) = 0; + + /** + * The method checks whether we should add a new round + * @param round - round for creation + * @return true if round should be created + */ + virtual bool shouldCreateRound(const Round &round) = 0; + + virtual ~CleanupStrategy() = default; + }; +} // namespace iroha::consensus::yac #endif // IROHA_CLEANUP_STRATEGY_HPP diff --git a/irohad/consensus/yac/storage/impl/buffered_cleanup_strategy.cpp b/irohad/consensus/yac/storage/impl/buffered_cleanup_strategy.cpp index a39634bb5a7..d4cf5869d40 100644 --- a/irohad/consensus/yac/storage/impl/buffered_cleanup_strategy.cpp +++ b/irohad/consensus/yac/storage/impl/buffered_cleanup_strategy.cpp @@ -7,11 +7,10 @@ #include "common/visitor.hpp" -using namespace iroha::consensus; -using namespace iroha::consensus::yac; +using iroha::consensus::yac::BufferedCleanupStrategy; -boost::optional BufferedCleanupStrategy::finalize( - RoundType consensus_round, Answer answer) { +boost::optional +BufferedCleanupStrategy::finalize(RoundType consensus_round, Answer answer) { using OptRefRoundType = boost::optional &; auto &target_round = iroha::visit_in_place( answer, @@ -38,7 +37,8 @@ boost::optional BufferedCleanupStrategy::finalize( } } -CleanupStrategy::RoundsType BufferedCleanupStrategy::truncateCreatedRounds() { +iroha::consensus::yac::CleanupStrategy::RoundsType +BufferedCleanupStrategy::truncateCreatedRounds() { CleanupStrategy::RoundsType removed; if (last_commit_round_) { while (*last_commit_round_ > created_rounds_.top()) { diff --git a/irohad/consensus/yac/storage/impl/yac_block_storage.cpp b/irohad/consensus/yac/storage/impl/yac_block_storage.cpp index 28d60f826d7..1cb279f6050 100644 --- a/irohad/consensus/yac/storage/impl/yac_block_storage.cpp +++ b/irohad/consensus/yac/storage/impl/yac_block_storage.cpp @@ -7,84 +7,79 @@ #include "logger/logger.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - // --------| Public API |-------- - - YacBlockStorage::YacBlockStorage( - YacHash hash, - PeersNumberType peers_in_round, - std::shared_ptr supermajority_checker, - logger::LoggerPtr log) - : storage_key_(std::move(hash)), - peers_in_round_(peers_in_round), - supermajority_checker_(std::move(supermajority_checker)), - log_(std::move(log)) {} - - boost::optional YacBlockStorage::insert(VoteMessage msg) { - if (validScheme(msg) and uniqueVote(msg)) { - votes_.push_back(msg); - - log_->info( - "Vote with round {} and hashes ({}, {}) inserted, votes in " - "storage [{}/{}]", - msg.hash.vote_round, - msg.hash.vote_hashes.proposal_hash, - msg.hash.vote_hashes.block_hash, - votes_.size(), - peers_in_round_); - } - return getState(); - } - - boost::optional YacBlockStorage::insert( - std::vector votes) { - std::for_each(votes.begin(), votes.end(), [this](auto vote) { - this->insert(vote); - }); - return getState(); - } - - std::vector YacBlockStorage::getVotes() const { - return votes_; - } - - size_t YacBlockStorage::getNumberOfVotes() const { - return votes_.size(); - } - - boost::optional YacBlockStorage::getState() { - auto supermajority = supermajority_checker_->hasSupermajority( - votes_.size(), peers_in_round_); - if (supermajority) { - return Answer(CommitMessage(votes_)); - } - return boost::none; - } - - bool YacBlockStorage::isContains(const VoteMessage &msg) const { - return std::count(votes_.begin(), votes_.end(), msg) != 0; - } - - YacHash YacBlockStorage::getStorageKey() const { - return storage_key_; - } - - // --------| private api |-------- - - bool YacBlockStorage::uniqueVote(VoteMessage &msg) { - // lookup take O(n) times - return std::all_of(votes_.begin(), votes_.end(), [&msg](auto vote) { - return vote != msg; - }); - } - - bool YacBlockStorage::validScheme(VoteMessage &vote) { - return getStorageKey() == vote.hash; - } - - } // namespace yac - } // namespace consensus -} // namespace iroha +using iroha::consensus::yac::YacBlockStorage; + +// --------| Public API |-------- + +YacBlockStorage::YacBlockStorage( + YacHash hash, + PeersNumberType peers_in_round, + std::shared_ptr supermajority_checker, + logger::LoggerPtr log) + : storage_key_(std::move(hash)), + peers_in_round_(peers_in_round), + supermajority_checker_(std::move(supermajority_checker)), + log_(std::move(log)) {} + +boost::optional YacBlockStorage::insert( + VoteMessage msg) { + if (validScheme(msg) and uniqueVote(msg)) { + votes_.push_back(msg); + + log_->info( + "Vote with round {} and hashes ({}, {}) inserted, votes in " + "storage [{}/{}]", + msg.hash.vote_round, + msg.hash.vote_hashes.proposal_hash, + msg.hash.vote_hashes.block_hash, + votes_.size(), + peers_in_round_); + } + return getState(); +} + +boost::optional YacBlockStorage::insert( + std::vector votes) { + for (auto &vote : votes) { + insert(std::move(vote)); + } + return getState(); +} + +std::vector YacBlockStorage::getVotes() + const { + return votes_; +} + +size_t YacBlockStorage::getNumberOfVotes() const { + return votes_.size(); +} + +boost::optional YacBlockStorage::getState() { + auto supermajority = + supermajority_checker_->hasSupermajority(votes_.size(), peers_in_round_); + if (supermajority) { + return Answer(CommitMessage(votes_)); + } + return boost::none; +} + +bool YacBlockStorage::isContains(const VoteMessage &msg) const { + return std::count(votes_.begin(), votes_.end(), msg) != 0; +} + +iroha::consensus::yac::YacHash YacBlockStorage::getStorageKey() const { + return storage_key_; +} + +// --------| private api |-------- + +bool YacBlockStorage::uniqueVote(VoteMessage &msg) { + // lookup take O(n) times + return std::all_of( + votes_.begin(), votes_.end(), [&msg](auto vote) { return vote != msg; }); +} + +bool YacBlockStorage::validScheme(VoteMessage &vote) { + return getStorageKey() == vote.hash; +} diff --git a/irohad/consensus/yac/storage/impl/yac_common.cpp b/irohad/consensus/yac/storage/impl/yac_common.cpp index 29f165337e6..f8f3c21f09d 100644 --- a/irohad/consensus/yac/storage/impl/yac_common.cpp +++ b/irohad/consensus/yac/storage/impl/yac_common.cpp @@ -9,36 +9,32 @@ #include "consensus/yac/outcome_messages.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - bool sameKeys(const std::vector &votes) { - if (votes.empty()) { - return false; - } - - auto first = votes.at(0); - return std::all_of( - votes.begin(), votes.end(), [&first](const auto ¤t) { - return first.hash.vote_round == current.hash.vote_round; - }); - } - - boost::optional getKey(const std::vector &votes) { - if (not sameKeys(votes)) { - return boost::none; - } - return votes[0].hash.vote_round; - } - - boost::optional getHash(const std::vector &votes) { - if (not sameKeys(votes)) { - return boost::none; - } - - return votes.at(0).hash; - } - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace yac = iroha::consensus::yac; + +bool yac::sameKeys(const std::vector &votes) { + if (votes.empty()) { + return false; + } + + auto first = votes.at(0); + return std::all_of(votes.begin(), votes.end(), [&first](const auto ¤t) { + return first.hash.vote_round == current.hash.vote_round; + }); +} + +boost::optional yac::getKey( + const std::vector &votes) { + if (not sameKeys(votes)) { + return boost::none; + } + return votes[0].hash.vote_round; +} + +boost::optional yac::getHash( + const std::vector &votes) { + if (not sameKeys(votes)) { + return boost::none; + } + + return votes.at(0).hash; +} diff --git a/irohad/consensus/yac/storage/impl/yac_proposal_storage.cpp b/irohad/consensus/yac/storage/impl/yac_proposal_storage.cpp index 8882b88aed0..e7835eb46fb 100644 --- a/irohad/consensus/yac/storage/impl/yac_proposal_storage.cpp +++ b/irohad/consensus/yac/storage/impl/yac_proposal_storage.cpp @@ -9,143 +9,135 @@ #include "logger/logger.hpp" #include "logger/logger_manager.hpp" -using namespace logger; - -namespace iroha { - namespace consensus { - namespace yac { - - // --------| private api |-------- - - auto YacProposalStorage::findStore(const YacHash &store_hash) { - // find exist - auto iter = std::find_if(block_storages_.begin(), - block_storages_.end(), - [&store_hash](auto block_storage) { - auto storage_key = - block_storage.getStorageKey(); - return storage_key == store_hash; - }); - if (iter != block_storages_.end()) { - return iter; - } - // insert and return new - return block_storages_.emplace( - block_storages_.end(), - YacHash(store_hash.vote_round, - store_hash.vote_hashes.proposal_hash, - store_hash.vote_hashes.block_hash), - peers_in_round_, - supermajority_checker_, - log_manager_->getChild("BlockStorage")->getLogger()); - } - - // --------| public api |-------- - - YacProposalStorage::YacProposalStorage( - Round store_round, - PeersNumberType peers_in_round, - std::shared_ptr supermajority_checker, - logger::LoggerManagerTreePtr log_manager) - : current_state_(boost::none), - storage_key_(store_round), - peers_in_round_(peers_in_round), - supermajority_checker_(supermajority_checker), - log_manager_(std::move(log_manager)), - log_(log_manager_->getLogger()) {} - - boost::optional YacProposalStorage::insert(VoteMessage msg) { - if (shouldInsert(msg)) { - // insert to block store - - log_->info("Vote with {} and hashes [{}, {}] looks valid", - msg.hash.vote_round, - msg.hash.vote_hashes.proposal_hash, - msg.hash.vote_hashes.block_hash); - - auto iter = findStore(msg.hash); - auto block_state = iter->insert(msg); - - // Single BlockStorage always returns CommitMessage because it - // aggregates votes for a single hash. - if (block_state) { - // supermajority on block achieved - current_state_ = std::move(block_state); - } else { - // try to find reject case - auto reject_state = findRejectProof(); - if (reject_state) { - log_->info("Found reject proof"); - current_state_ = std::move(reject_state); - } - } - } - return getState(); - } - - boost::optional YacProposalStorage::insert( - std::vector messages) { - std::for_each(messages.begin(), messages.end(), [this](auto vote) { - this->insert(std::move(vote)); - }); - return getState(); - } - - const Round &YacProposalStorage::getStorageKey() const { - return storage_key_; - } - - boost::optional YacProposalStorage::getState() const { - return current_state_; - } - - // --------| private api |-------- - - bool YacProposalStorage::shouldInsert(const VoteMessage &msg) { - return checkProposalRound(msg.hash.vote_round) - and checkPeerUniqueness(msg); - } +using iroha::consensus::yac::YacProposalStorage; - bool YacProposalStorage::checkProposalRound(const Round &vote_round) { - return vote_round == storage_key_; - } +// --------| private api |-------- - bool YacProposalStorage::checkPeerUniqueness(const VoteMessage &msg) { - return std::all_of(block_storages_.begin(), +auto YacProposalStorage::findStore(const YacHash &store_hash) { + // find exist + auto iter = std::find_if(block_storages_.begin(), block_storages_.end(), - [&msg](YacBlockStorage &storage) { - if (storage.getStorageKey() != msg.hash) { - return true; - } - return not storage.isContains(msg); + [&store_hash](auto block_storage) { + auto storage_key = block_storage.getStorageKey(); + return storage_key == store_hash; }); + if (iter != block_storages_.end()) { + return iter; + } + // insert and return new + return block_storages_.emplace( + block_storages_.end(), + YacHash(store_hash.vote_round, + store_hash.vote_hashes.proposal_hash, + store_hash.vote_hashes.block_hash), + peers_in_round_, + supermajority_checker_, + log_manager_->getChild("BlockStorage")->getLogger()); +} + +// --------| public api |-------- + +YacProposalStorage::YacProposalStorage( + Round store_round, + PeersNumberType peers_in_round, + std::shared_ptr supermajority_checker, + logger::LoggerManagerTreePtr log_manager) + : current_state_(boost::none), + storage_key_(store_round), + peers_in_round_(peers_in_round), + supermajority_checker_(supermajority_checker), + log_manager_(std::move(log_manager)), + log_(log_manager_->getLogger()) {} + +boost::optional YacProposalStorage::insert( + VoteMessage msg) { + if (shouldInsert(msg)) { + // insert to block store + + log_->info("Vote with {} and hashes [{}, {}] looks valid", + msg.hash.vote_round, + msg.hash.vote_hashes.proposal_hash, + msg.hash.vote_hashes.block_hash); + + auto iter = findStore(msg.hash); + auto block_state = iter->insert(msg); + + // Single BlockStorage always returns CommitMessage because it + // aggregates votes for a single hash. + if (block_state) { + // supermajority on block achieved + current_state_ = std::move(block_state); + } else { + // try to find reject case + auto reject_state = findRejectProof(); + if (reject_state) { + log_->info("Found reject proof"); + current_state_ = std::move(reject_state); } - - boost::optional YacProposalStorage::findRejectProof() { - auto is_reject = not supermajority_checker_->canHaveSupermajority( - block_storages_ - | boost::adaptors::transformed([](const auto &storage) { - return storage.getNumberOfVotes(); - }), - peers_in_round_); - - if (is_reject) { - std::vector result; - std::for_each(block_storages_.begin(), - block_storages_.end(), - [&result](auto &storage) { - auto votes_from_block_storage = storage.getVotes(); - std::move(votes_from_block_storage.begin(), - votes_from_block_storage.end(), - std::back_inserter(result)); - }); - - return Answer(RejectMessage(std::move(result))); - } - - return boost::none; - } - - } // namespace yac - } // namespace consensus -} // namespace iroha + } + } + return getState(); +} + +boost::optional YacProposalStorage::insert( + std::vector messages) { + for (auto &vote : messages) { + insert(std::move(vote)); + } + return getState(); +} + +const iroha::consensus::Round &YacProposalStorage::getStorageKey() const { + return storage_key_; +} + +boost::optional YacProposalStorage::getState() + const { + return current_state_; +} + +// --------| private api |-------- + +bool YacProposalStorage::shouldInsert(const VoteMessage &msg) { + return checkProposalRound(msg.hash.vote_round) and checkPeerUniqueness(msg); +} + +bool YacProposalStorage::checkProposalRound(const Round &vote_round) { + return vote_round == storage_key_; +} + +bool YacProposalStorage::checkPeerUniqueness(const VoteMessage &msg) { + return std::all_of(block_storages_.begin(), + block_storages_.end(), + [&msg](YacBlockStorage &storage) { + if (storage.getStorageKey() != msg.hash) { + return true; + } + return not storage.isContains(msg); + }); +} + +boost::optional +YacProposalStorage::findRejectProof() { + auto is_reject = not supermajority_checker_->canHaveSupermajority( + block_storages_ | boost::adaptors::transformed([](const auto &storage) { + return storage.getNumberOfVotes(); + }), + peers_in_round_); + + if (is_reject) { + std::vector result; + std::for_each(block_storages_.begin(), + block_storages_.end(), + [&result](auto &storage) { + auto votes_from_block_storage = storage.getVotes(); + std::move(votes_from_block_storage.begin(), + votes_from_block_storage.end(), + std::back_inserter(result)); + }); + + return Answer(RejectMessage(std::move(result))); + } + + return boost::none; +} diff --git a/irohad/consensus/yac/storage/impl/yac_vote_storage.cpp b/irohad/consensus/yac/storage/impl/yac_vote_storage.cpp index af41165ca83..c03d65c48ce 100644 --- a/irohad/consensus/yac/storage/impl/yac_vote_storage.cpp +++ b/irohad/consensus/yac/storage/impl/yac_vote_storage.cpp @@ -13,142 +13,141 @@ #include "consensus/yac/storage/yac_proposal_storage.hpp" #include "logger/logger_manager.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - // --------| private api |-------- - - namespace { - /** - * Find storage with corresponding key - * @tparam T - storage type - * @param storage - ref or const ref for the storage - * @param round - required round - * @return iterator for the storage - */ - template - auto findStorage(T &storage, const Round &round) { - return std::find_if( - storage.begin(), storage.end(), [&round](const auto &storage) { - return storage.getStorageKey() == round; - }); - } - } // namespace - - auto YacVoteStorage::getProposalStorage(const Round &round) { - return findStorage(proposal_storages_, round); - } - - auto YacVoteStorage::getProposalStorage(const Round &round) const { - return findStorage(proposal_storages_, round); - } - - boost::optional::iterator> - YacVoteStorage::findProposalStorage(const VoteMessage &msg, - PeersNumberType peers_in_round) { - const auto &round = msg.hash.vote_round; - auto val = getProposalStorage(round); - if (val != proposal_storages_.end()) { - return val; - } - if (strategy_->shouldCreateRound(round)) { - return proposal_storages_.emplace( - proposal_storages_.end(), - msg.hash.vote_round, - peers_in_round, - supermajority_checker_, - log_manager_->getChild("ProposalStorage")); - } else { - return boost::none; - } - } - - void YacVoteStorage::remove(const iroha::consensus::Round &round) { - auto val = getProposalStorage(round); - if (val != proposal_storages_.end()) { - proposal_storages_.erase(val); - } - auto state = processing_state_.find(round); - if (state != processing_state_.end()) { - processing_state_.erase(state); - } - } - - // --------| public api |-------- - - YacVoteStorage::YacVoteStorage( - std::shared_ptr cleanup_strategy, - std::unique_ptr supermajority_checker, - logger::LoggerManagerTreePtr log_manager) - : strategy_(std::move(cleanup_strategy)), - supermajority_checker_(std::move(supermajority_checker)), - log_manager_(std::move(log_manager)) {} - - boost::optional YacVoteStorage::store( - std::vector state, PeersNumberType peers_in_round) { - if (state.empty()) { - return boost::none; - } - return findProposalStorage(state.at(0), peers_in_round) | - [this, &state](auto &&storage) { - const auto &round = storage->getStorageKey(); - return storage->insert(state) | - [this, &round]( - auto &&insert_outcome) -> boost::optional { - last_round_ = std::max(last_round_.value_or(round), round); - this->strategy_->finalize(round, insert_outcome) | - [this](auto &&remove) { - std::for_each( - remove.begin(), - remove.end(), - [this](const auto &round) { this->remove(round); }); - }; - return insert_outcome; +using iroha::consensus::yac::YacVoteStorage; + +// --------| private api |-------- + +namespace { + /** + * Find storage with corresponding key + * @tparam T - storage type + * @param storage - ref or const ref for the storage + * @param round - required round + * @return iterator for the storage + */ + template + auto findStorage(T &storage, const iroha::consensus::Round &round) { + return std::find_if( + storage.begin(), storage.end(), [&round](const auto &storage) { + return storage.getStorageKey() == round; + }); + } +} // namespace + +auto YacVoteStorage::getProposalStorage(const Round &round) { + return findStorage(proposal_storages_, round); +} + +auto YacVoteStorage::getProposalStorage(const Round &round) const { + return findStorage(proposal_storages_, round); +} + +boost::optional< + std::vector::iterator> +YacVoteStorage::findProposalStorage(const VoteMessage &msg, + PeersNumberType peers_in_round) { + const auto &round = msg.hash.vote_round; + auto val = getProposalStorage(round); + if (val != proposal_storages_.end()) { + return val; + } + if (strategy_->shouldCreateRound(round)) { + return proposal_storages_.emplace( + proposal_storages_.end(), + msg.hash.vote_round, + peers_in_round, + supermajority_checker_, + log_manager_->getChild("ProposalStorage")); + } else { + return boost::none; + } +} + +void YacVoteStorage::remove(const iroha::consensus::Round &round) { + auto val = getProposalStorage(round); + if (val != proposal_storages_.end()) { + proposal_storages_.erase(val); + } + auto state = processing_state_.find(round); + if (state != processing_state_.end()) { + processing_state_.erase(state); + } +} + +// --------| public api |-------- + +YacVoteStorage::YacVoteStorage( + std::shared_ptr cleanup_strategy, + std::unique_ptr supermajority_checker, + logger::LoggerManagerTreePtr log_manager) + : strategy_(std::move(cleanup_strategy)), + supermajority_checker_(std::move(supermajority_checker)), + log_manager_(std::move(log_manager)) {} + +boost::optional YacVoteStorage::store( + std::vector state, PeersNumberType peers_in_round) { + if (state.empty()) { + return boost::none; + } + return findProposalStorage(state.at(0), peers_in_round) | + [this, &state](auto &&storage) { + const auto &round = storage->getStorageKey(); + return storage->insert(state) | + [this, + &round](auto &&insert_outcome) -> boost::optional { + last_round_ = std::max(last_round_.value_or(round), round); + this->strategy_->finalize(round, insert_outcome) | + [this](auto &&to_remove) { + for (auto const &round : to_remove) { + remove(round); + } }; - }; - } - - bool YacVoteStorage::isCommitted(const Round &round) { - auto iter = getProposalStorage(round); - if (iter == proposal_storages_.end()) { - return false; - } - return bool(iter->getState()); - } - - ProposalState YacVoteStorage::getProcessingState(const Round &round) { - return processing_state_[round]; - } - - void YacVoteStorage::nextProcessingState(const Round &round) { - auto &val = processing_state_[round]; - switch (val) { - case ProposalState::kNotSentNotProcessed: - val = ProposalState::kSentNotProcessed; - break; - case ProposalState::kSentNotProcessed: - val = ProposalState::kSentProcessed; - break; - case ProposalState::kSentProcessed: - break; - } - } - - boost::optional YacVoteStorage::getLastFinalizedRound() const { - return last_round_; - } - - boost::optional YacVoteStorage::getState( - const Round &round) const { - auto proposal_storage = getProposalStorage(round); - if (proposal_storage != proposal_storages_.end()) { - return proposal_storage->getState(); - } else { - return boost::none; - } - } - - } // namespace yac - } // namespace consensus -} // namespace iroha + return insert_outcome; + }; + }; +} + +bool YacVoteStorage::isCommitted(const Round &round) { + auto iter = getProposalStorage(round); + if (iter == proposal_storages_.end()) { + if (auto last_round = getLastFinalizedRound()) { + return *last_round >= round; + } + return false; + } + return bool(iter->getState()); +} + +iroha::consensus::yac::ProposalState YacVoteStorage::getProcessingState( + const Round &round) { + return processing_state_[round]; +} + +void YacVoteStorage::nextProcessingState(const Round &round) { + auto &val = processing_state_[round]; + switch (val) { + case ProposalState::kNotSentNotProcessed: + val = ProposalState::kSentNotProcessed; + break; + case ProposalState::kSentNotProcessed: + val = ProposalState::kSentProcessed; + break; + case ProposalState::kSentProcessed: + break; + } +} + +boost::optional YacVoteStorage::getLastFinalizedRound() + const { + return last_round_; +} + +boost::optional YacVoteStorage::getState( + const Round &round) const { + auto proposal_storage = getProposalStorage(round); + if (proposal_storage != proposal_storages_.end()) { + return proposal_storage->getState(); + } else { + return boost::none; + } +} diff --git a/irohad/consensus/yac/storage/storage_result.hpp b/irohad/consensus/yac/storage/storage_result.hpp index f5dcc090775..61fa06a0d1e 100644 --- a/irohad/consensus/yac/storage/storage_result.hpp +++ b/irohad/consensus/yac/storage/storage_result.hpp @@ -8,21 +8,16 @@ #include -namespace iroha { - namespace consensus { - namespace yac { +namespace iroha::consensus::yac { + struct CommitMessage; + struct RejectMessage; + struct FutureMessage; - struct CommitMessage; - struct RejectMessage; - struct FutureMessage; + /** + * Contains proof of supermajority for all purposes; + */ + using Answer = boost::variant; - /** - * Contains proof of supermajority for all purposes; - */ - using Answer = - boost::variant; +} // namespace iroha::consensus::yac - } // namespace yac - } // namespace consensus -} // namespace iroha #endif // IROHA_STORAGE_RESULT_HPP diff --git a/irohad/consensus/yac/storage/yac_block_storage.hpp b/irohad/consensus/yac/storage/yac_block_storage.hpp index d45a03b97e7..17b98cff235 100644 --- a/irohad/consensus/yac/storage/yac_block_storage.hpp +++ b/irohad/consensus/yac/storage/yac_block_storage.hpp @@ -16,112 +16,107 @@ #include "consensus/yac/yac_types.hpp" #include "logger/logger_fwd.hpp" -namespace iroha { - namespace consensus { - namespace yac { - /** - * Class provide storage of votes for one block. - */ - class YacBlockStorage { - private: - // --------| fields |-------- - - /** - * All votes stored in block store - */ - std::vector votes_; - - public: - YacBlockStorage( - YacHash hash, - PeersNumberType peers_in_round, - std::shared_ptr supermajority_checker, - logger::LoggerPtr log); - - /** - * Try to insert vote to storage - * @param msg - vote for insertion - * @return actual state of storage, - * boost::none when storage doesn't have supermajority - */ - boost::optional insert(VoteMessage msg); - - /** - * Insert vector of votes to current storage - * @param votes - bunch of votes for insertion - * @return state of storage after insertion last vote, - * boost::none when storage doesn't have supermajority - */ - boost::optional insert(std::vector votes); - - /** - * @return votes attached to storage - */ - std::vector getVotes() const; - - /** - * @return number of votes attached to storage - */ - size_t getNumberOfVotes() const; - - /** - * @return current block store state - */ - boost::optional getState(); - - /** - * Verify that passed vote contains in storage - * @param msg - vote for finding - * @return true, if contains - */ - bool isContains(const VoteMessage &msg) const; - - /** - * Provide key attached to this storage - */ - YacHash getStorageKey() const; - - private: - // --------| private api |-------- - - /** - * Verify uniqueness of vote in storage - * @param msg - vote for verification - * @return true if vote doesn't appear in storage - */ - bool uniqueVote(VoteMessage &vote); - - /** - * Verify that vote has the same hash attached as the storage - * @param vote - vote to be checked - * @return true, if validation passed - */ - bool validScheme(VoteMessage &vote); - - // --------| fields |-------- - - /** - * Key of the storage; currently it's yac hash - */ - YacHash storage_key_; - - /** - * Number of peers in current round - */ - PeersNumberType peers_in_round_; - - /** - * Provide functions to check supermajority - */ - std::shared_ptr supermajority_checker_; - - /** - * Storage logger - */ - logger::LoggerPtr log_; - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + /** + * Class provide storage of votes for one block. + */ + class YacBlockStorage { + private: + // --------| fields |-------- + + /** + * All votes stored in block store + */ + std::vector votes_; + + public: + YacBlockStorage(YacHash hash, + PeersNumberType peers_in_round, + std::shared_ptr supermajority_checker, + logger::LoggerPtr log); + + /** + * Try to insert vote to storage + * @param msg - vote for insertion + * @return actual state of storage, + * boost::none when storage doesn't have supermajority + */ + boost::optional insert(VoteMessage msg); + + /** + * Insert vector of votes to current storage + * @param votes - bunch of votes for insertion + * @return state of storage after insertion last vote, + * boost::none when storage doesn't have supermajority + */ + boost::optional insert(std::vector votes); + + /** + * @return votes attached to storage + */ + std::vector getVotes() const; + + /** + * @return number of votes attached to storage + */ + size_t getNumberOfVotes() const; + + /** + * @return current block store state + */ + boost::optional getState(); + + /** + * Verify that passed vote contains in storage + * @param msg - vote for finding + * @return true, if contains + */ + bool isContains(const VoteMessage &msg) const; + + /** + * Provide key attached to this storage + */ + YacHash getStorageKey() const; + + private: + // --------| private api |-------- + + /** + * Verify uniqueness of vote in storage + * @param msg - vote for verification + * @return true if vote doesn't appear in storage + */ + bool uniqueVote(VoteMessage &vote); + + /** + * Verify that vote has the same hash attached as the storage + * @param vote - vote to be checked + * @return true, if validation passed + */ + bool validScheme(VoteMessage &vote); + + // --------| fields |-------- + + /** + * Key of the storage; currently it's yac hash + */ + YacHash storage_key_; + + /** + * Number of peers in current round + */ + PeersNumberType peers_in_round_; + + /** + * Provide functions to check supermajority + */ + std::shared_ptr supermajority_checker_; + + /** + * Storage logger + */ + logger::LoggerPtr log_; + }; +} // namespace iroha::consensus::yac + #endif // IROHA_YAC_BLOCK_VOTE_STORAGE_HPP diff --git a/irohad/consensus/yac/storage/yac_common.hpp b/irohad/consensus/yac/storage/yac_common.hpp index b26d6304476..2f90ae13788 100644 --- a/irohad/consensus/yac/storage/yac_common.hpp +++ b/irohad/consensus/yac/storage/yac_common.hpp @@ -12,42 +12,37 @@ #include "consensus/round.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - class YacHash; - struct VoteMessage; - - using ProposalHash = std::string; - - using BlockHash = std::string; - - /** - * Check that all votes in collection have the same key - * @param votes - collection of votes - * @return true, if rounds of those votes are the same - */ - bool sameKeys(const std::vector &votes); - - /** - * Provide key common for whole collection - * @param votes - collection with votes - * @return vote round, if collection shared the same round, - * otherwise boost::none - */ - boost::optional getKey(const std::vector &votes); - - /** - * Get common hash from collection - * @param votes - collection with votes - * @return hash, if collection elements have same hash, - * otherwise boost::none - */ - boost::optional getHash(const std::vector &votes); - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + class YacHash; + struct VoteMessage; + + using ProposalHash = std::string; + + using BlockHash = std::string; + + /** + * Check that all votes in collection have the same key + * @param votes - collection of votes + * @return true, if rounds of those votes are the same + */ + bool sameKeys(const std::vector &votes); + + /** + * Provide key common for whole collection + * @param votes - collection with votes + * @return vote round, if collection shared the same round, + * otherwise boost::none + */ + boost::optional getKey(const std::vector &votes); + + /** + * Get common hash from collection + * @param votes - collection with votes + * @return hash, if collection elements have same hash, + * otherwise boost::none + */ + boost::optional getHash(const std::vector &votes); + +} // namespace iroha::consensus::yac #endif // IROHA_YAC_COMMON_HPP diff --git a/irohad/consensus/yac/storage/yac_proposal_storage.hpp b/irohad/consensus/yac/storage/yac_proposal_storage.hpp index 359954ebee3..cc49b1dfbf4 100644 --- a/irohad/consensus/yac/storage/yac_proposal_storage.hpp +++ b/irohad/consensus/yac/storage/yac_proposal_storage.hpp @@ -18,133 +18,129 @@ #include "logger/logger_fwd.hpp" #include "logger/logger_manager_fwd.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - struct VoteMessage; - - /** - * Class for storing votes related to given proposal/block round - * and gain information about commits/rejects for this round - */ - class YacProposalStorage { - private: - // --------| private api |-------- - - /** - * Find block index with provided parameters, - * if those store absent - create new - * @param store_hash - hash of store of interest - * @return iterator to storage - */ - auto findStore(const YacHash &store_hash); - - public: - // --------| public api |-------- - - YacProposalStorage( - Round store_round, - PeersNumberType peers_in_round, - std::shared_ptr supermajority_checker, - logger::LoggerManagerTreePtr log_manager); - - /** - * Try to insert vote to storage - * @param vote - object for insertion - * @return result, that contains actual state of storage. - * boost::none if not inserted, possible reasons - duplication, - * wrong proposal/block round. - */ - boost::optional insert(VoteMessage vote); - - /** - * Insert bundle of messages into storage - * @param messages - collection of messages - * @return result, that contains actual state of storage, - * after insertion of all votes. - */ - boost::optional insert(std::vector messages); - - /** - * Provides key for storage - */ - const Round &getStorageKey() const; - - /** - * @return current state of storage - */ - boost::optional getState() const; - - private: - // --------| private api |-------- - - /** - * Possible to insert vote - * @param msg - vote for insertion - * @return true if possible - */ - bool shouldInsert(const VoteMessage &msg); - - /** - * Is this vote valid for insertion in proposal storage - * @param vote_round - round for verification - * @return true if it may be applied - */ - bool checkProposalRound(const Round &vote_round); - - /** - * Is this peer first time appear in this proposal storage - * @return true, if peer unique - */ - bool checkPeerUniqueness(const VoteMessage &msg); - - /** - * Method try to find proof of reject. - * This computes as - * number of not voted peers + most frequent vote count < supermajority - * @return answer with proof - */ - boost::optional findRejectProof(); - - // --------| fields |-------- - - /** - * Current state of storage - */ - boost::optional current_state_; - - /** - * Vector of block storages based on this proposal - */ - std::vector block_storages_; - - /** - * Key of the storage - */ - Round storage_key_; - - /** - * Provide number of peers participated in current round - */ - PeersNumberType peers_in_round_; - - /** - * Provide functions to check supermajority - */ - std::shared_ptr supermajority_checker_; - - /** - * Storage logger manager - */ - logger::LoggerManagerTreePtr log_manager_; - - /** - * Storage logger - */ - logger::LoggerPtr log_; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + struct VoteMessage; + + /** + * Class for storing votes related to given proposal/block round + * and gain information about commits/rejects for this round + */ + class YacProposalStorage { + private: + // --------| private api |-------- + + /** + * Find block index with provided parameters, + * if those store absent - create new + * @param store_hash - hash of store of interest + * @return iterator to storage + */ + auto findStore(const YacHash &store_hash); + + public: + // --------| public api |-------- + + YacProposalStorage( + Round store_round, + PeersNumberType peers_in_round, + std::shared_ptr supermajority_checker, + logger::LoggerManagerTreePtr log_manager); + + /** + * Try to insert vote to storage + * @param vote - object for insertion + * @return result, that contains actual state of storage. + * boost::none if not inserted, possible reasons - duplication, + * wrong proposal/block round. + */ + boost::optional insert(VoteMessage vote); + + /** + * Insert bundle of messages into storage + * @param messages - collection of messages + * @return result, that contains actual state of storage, + * after insertion of all votes. + */ + boost::optional insert(std::vector messages); + + /** + * Provides key for storage + */ + const Round &getStorageKey() const; + + /** + * @return current state of storage + */ + boost::optional getState() const; + + private: + // --------| private api |-------- + + /** + * Possible to insert vote + * @param msg - vote for insertion + * @return true if possible + */ + bool shouldInsert(const VoteMessage &msg); + + /** + * Is this vote valid for insertion in proposal storage + * @param vote_round - round for verification + * @return true if it may be applied + */ + bool checkProposalRound(const Round &vote_round); + + /** + * Is this peer first time appear in this proposal storage + * @return true, if peer unique + */ + bool checkPeerUniqueness(const VoteMessage &msg); + + /** + * Method try to find proof of reject. + * This computes as + * number of not voted peers + most frequent vote count < supermajority + * @return answer with proof + */ + boost::optional findRejectProof(); + + // --------| fields |-------- + + /** + * Current state of storage + */ + boost::optional current_state_; + + /** + * Vector of block storages based on this proposal + */ + std::vector block_storages_; + + /** + * Key of the storage + */ + Round storage_key_; + + /** + * Provide number of peers participated in current round + */ + PeersNumberType peers_in_round_; + + /** + * Provide functions to check supermajority + */ + std::shared_ptr supermajority_checker_; + + /** + * Storage logger manager + */ + logger::LoggerManagerTreePtr log_manager_; + + /** + * Storage logger + */ + logger::LoggerPtr log_; + }; +} // namespace iroha::consensus::yac + #endif // IROHA_YAC_PROPOSAL_STORAGE_HPP diff --git a/irohad/consensus/yac/storage/yac_vote_storage.hpp b/irohad/consensus/yac/storage/yac_vote_storage.hpp index fbaee797db9..d0555cb78c8 100644 --- a/irohad/consensus/yac/storage/yac_vote_storage.hpp +++ b/irohad/consensus/yac/storage/yac_vote_storage.hpp @@ -21,170 +21,161 @@ #include "consensus/yac/yac_types.hpp" #include "logger/logger_manager_fwd.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - /** - * Proposal outcome states for multicast propagation strategy - * - * Outcome is either CommitMessage, which guarantees that supermajority of - * votes for the proposal-block hashes is collected, or RejectMessage, - * which states that supermajority of votes for a block hash cannot be - * achieved - * - * kNotSentNotProcessed - outcome was not propagated in the network - * AND it was not passed to pipeline. Initial state after receiving an - * outcome from storage. Outcome with votes is propagated to the network - * in this state. - * - * kSentNotProcessed - outcome was propagated in the network - * AND it was not passed to pipeline. State can be set in two cases: - * 1. Outcome is received from the network. Some node has already achieved - * an outcome and has propagated it to the network, so the first state is - * skipped. - * 2. Outcome was propagated to the network - * Outcome is passed to pipeline in this state. - * - * kSentProcessed - outcome was propagated in the network - * AND it was passed to pipeline. Set after passing proposal to pipeline. - * This state is final. Receiving a network message in this state results - * in direct propagation of outcome to message sender. - */ - enum class ProposalState { - kNotSentNotProcessed, - kSentNotProcessed, - kSentProcessed - }; - - /** - * Class provide storage for votes and useful methods for it. - */ - class YacVoteStorage { - private: - // --------| private api |-------- - - /** - * Retrieve iterator for storage with specified key - * @param round - key of that storage - * @return iterator to proposal storage - */ - auto getProposalStorage(const Round &round); - auto getProposalStorage(const Round &round) const; - - /** - * Find existed proposal storage or create new if required - * @param msg - vote for finding - * @param peers_in_round - number of peer required - * for verify supermajority; - * This parameter used on creation of proposal storage - * @return - iter for required proposal storage - */ - boost::optional::iterator> - findProposalStorage(const VoteMessage &msg, - PeersNumberType peers_in_round); - - /** - * Remove proposal storage by round - */ - void remove(const Round &round); - - public: - // --------| public api |-------- - - /** - * @param cleanup_strategy - strategy for removing elements from storage - * @param consistency_model - consensus consistency model (CFT, BFT). - * @param log_manager - log manager to create component loggers - */ - YacVoteStorage( - std::shared_ptr cleanup_strategy, - std::unique_ptr supermajority_checker, - logger::LoggerManagerTreePtr log_manager); - - /** - * Insert votes in storage - * @param state - current message with votes - * @param peers_in_round - number of peers participated in round - * @return structure with result of inserting. - * boost::none if msg not valid. - */ - boost::optional store(std::vector state, - PeersNumberType peers_in_round); - - /** - * Provide status about closing round of proposal/block - * @param round, in which proposal/block is supposed to be committed - * @return true, if round closed - */ - bool isCommitted(const Round &round); - - /** - * Method provide state of processing for concrete proposal/block - * @param round, in which that proposal/block is being voted - * @return value attached to parameter's round. Default is - * kNotSentNotProcessed. - */ - ProposalState getProcessingState(const Round &round); - - /** - * Mark round with following transition: - * kNotSentNotProcessed -> kSentNotProcessed - * kSentNotProcessed -> kSentProcessed - * kSentProcessed -> kSentProcessed - * @see ProposalState description for transition cases - * @param round - target tag - */ - void nextProcessingState(const Round &round); - - /** - * Get last by order finalized round - * @return round if it exists - */ - boost::optional getLastFinalizedRound() const; - - /** - * Get the state attached of a past round - * @param round - required round - * @return state if round exists and finalized - */ - boost::optional getState(const Round &round) const; - - private: - // --------| fields |-------- - - // TODO: 2019-02-28 @muratovv refactor proposal_storages_ & - // processing_state_ with separate entity IR-360 - - /** - * Active proposal storages - */ - std::vector proposal_storages_; - - /** - * Processing set provide user flags about processing some - * proposals/blocks. - * If such round exists <=> processed - */ - std::unordered_map - processing_state_; - - /** - * Provides strategy managing rounds (adding and removing) for the - * storage - */ - std::shared_ptr strategy_; - - /// last finalized round - boost::optional last_round_; - - std::shared_ptr supermajority_checker_; - - logger::LoggerManagerTreePtr log_manager_; - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + /** + * Proposal outcome states for multicast propagation strategy + * + * Outcome is either CommitMessage, which guarantees that supermajority of + * votes for the proposal-block hashes is collected, or RejectMessage, + * which states that supermajority of votes for a block hash cannot be + * achieved + * + * kNotSentNotProcessed - outcome was not propagated in the network + * AND it was not passed to pipeline. Initial state after receiving an + * outcome from storage. Outcome with votes is propagated to the network + * in this state. + * + * kSentNotProcessed - outcome was propagated in the network + * AND it was not passed to pipeline. State can be set in two cases: + * 1. Outcome is received from the network. Some node has already achieved + * an outcome and has propagated it to the network, so the first state is + * skipped. + * 2. Outcome was propagated to the network + * Outcome is passed to pipeline in this state. + * + * kSentProcessed - outcome was propagated in the network + * AND it was passed to pipeline. Set after passing proposal to pipeline. + * This state is final. Receiving a network message in this state results + * in direct propagation of outcome to message sender. + */ + enum class ProposalState { + kNotSentNotProcessed, + kSentNotProcessed, + kSentProcessed + }; + + /** + * Class provide storage for votes and useful methods for it. + */ + class YacVoteStorage { + private: + // --------| private api |-------- + + /** + * Retrieve iterator for storage with specified key + * @param round - key of that storage + * @return iterator to proposal storage + */ + auto getProposalStorage(const Round &round); + auto getProposalStorage(const Round &round) const; + + /** + * Find existed proposal storage or create new if required + * @param msg - vote for finding + * @param peers_in_round - number of peer required + * for verify supermajority; + * This parameter used on creation of proposal storage + * @return - iter for required proposal storage + */ + boost::optional::iterator> + findProposalStorage(const VoteMessage &msg, PeersNumberType peers_in_round); + + public: + // --------| public api |-------- + + /** + * @param cleanup_strategy - strategy for removing elements from storage + * @param consistency_model - consensus consistency model (CFT, BFT). + * @param log_manager - log manager to create component loggers + */ + YacVoteStorage(std::shared_ptr cleanup_strategy, + std::unique_ptr supermajority_checker, + logger::LoggerManagerTreePtr log_manager); + + /** + * Insert votes in storage + * @param state - current message with votes + * @param peers_in_round - number of peers participated in round + * @return structure with result of inserting. + * boost::none if msg not valid. + */ + boost::optional store(std::vector state, + PeersNumberType peers_in_round); + + /** + * Provide status about closing round of proposal/block + * @param round, in which proposal/block is supposed to be committed + * @return true, if round closed + */ + bool isCommitted(const Round &round); + + /** + * Remove proposal storage by round + */ + void remove(const Round &round); + + /** + * Method provide state of processing for concrete proposal/block + * @param round, in which that proposal/block is being voted + * @return value attached to parameter's round. Default is + * kNotSentNotProcessed. + */ + ProposalState getProcessingState(const Round &round); + + /** + * Mark round with following transition: + * kNotSentNotProcessed -> kSentNotProcessed + * kSentNotProcessed -> kSentProcessed + * kSentProcessed -> kSentProcessed + * @see ProposalState description for transition cases + * @param round - target tag + */ + void nextProcessingState(const Round &round); + + /** + * Get last by order finalized round + * @return round if it exists + */ + boost::optional getLastFinalizedRound() const; + + /** + * Get the state attached of a past round + * @param round - required round + * @return state if round exists and finalized + */ + boost::optional getState(const Round &round) const; + + private: + // --------| fields |-------- + + // TODO: 2019-02-28 @muratovv refactor proposal_storages_ & + // processing_state_ with separate entity IR-360 + + /** + * Active proposal storages + */ + std::vector proposal_storages_; + + /** + * Processing set provide user flags about processing some + * proposals/blocks. + * If such round exists <=> processed + */ + std::unordered_map processing_state_; + + /** + * Provides strategy managing rounds (adding and removing) for the + * storage + */ + std::shared_ptr strategy_; + + /// last finalized round + boost::optional last_round_; + + std::shared_ptr supermajority_checker_; + + logger::LoggerManagerTreePtr log_manager_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_VOTE_STORAGE_HPP diff --git a/irohad/consensus/yac/supermajority_checker.hpp b/irohad/consensus/yac/supermajority_checker.hpp index d2d98b1f1b1..909ccbdb52e 100644 --- a/irohad/consensus/yac/supermajority_checker.hpp +++ b/irohad/consensus/yac/supermajority_checker.hpp @@ -15,65 +15,57 @@ #include "interfaces/common_objects/range_types.hpp" #include "interfaces/common_objects/types.hpp" -namespace shared_model { - namespace interface { - class Peer; - } -} // namespace shared_model +namespace shared_model::interface { + class Peer; +} // namespace shared_model::interface -namespace iroha { - namespace consensus { - namespace yac { +namespace iroha::consensus::yac { + /** + * Interface is responsible for checking if supermajority is achieved + */ + class SupermajorityChecker { + public: + using VoteGroups = boost::any_range; - /** - * Interface is responsible for checking if supermajority is achieved - */ - class SupermajorityChecker { - public: - using VoteGroups = boost::any_range; + virtual ~SupermajorityChecker() = default; - virtual ~SupermajorityChecker() = default; + /** + * Check if supermajority is achieved + * @param current actual number of signatures + * @param all number of peers + * @return true if supermajority is possible or false otherwise + */ + virtual bool hasSupermajority(PeersNumberType current, + PeersNumberType all) const = 0; - /** - * Check if supermajority is achieved - * @param current actual number of signatures - * @param all number of peers - * @return true if supermajority is possible or false otherwise - */ - virtual bool hasSupermajority(PeersNumberType current, - PeersNumberType all) const = 0; - - /** - * Check tolerance condition - * @param number - voted peers - * @param all - number of all peers in network - * @return true if the given number of peers is tolerated by the network - */ - virtual bool isTolerated(PeersNumberType number, - PeersNumberType all) const = 0; + /** + * Check tolerance condition + * @param number - voted peers + * @param all - number of all peers in network + * @return true if the given number of peers is tolerated by the network + */ + virtual bool isTolerated(PeersNumberType number, + PeersNumberType all) const = 0; - /** - * Check if supermajority is possible - * @param voted - numbers of peers voted for each option - * @param all - number of peers in round - * @return true, if reject - */ - virtual bool canHaveSupermajority(const VoteGroups &votes, - PeersNumberType all) const = 0; - }; - - /// Get a SupermajorityChecker for the given consistency model. - std::unique_ptr getSupermajorityChecker( - ConsistencyModel c); + /** + * Check if supermajority is possible + * @param voted - numbers of peers voted for each option + * @param all - number of peers in round + * @return true, if reject + */ + virtual bool canHaveSupermajority(const VoteGroups &votes, + PeersNumberType all) const = 0; + }; - } // namespace yac - } // namespace consensus -} // namespace iroha + /// Get a SupermajorityChecker for the given consistency model. + std::unique_ptr getSupermajorityChecker( + ConsistencyModel c); +} // namespace iroha::consensus::yac #endif // IROHA_CONSENSUS_SUPERMAJORITY_CHECKER_HPP diff --git a/irohad/consensus/yac/timer.hpp b/irohad/consensus/yac/timer.hpp index 864d07e87f0..2d0ca618879 100644 --- a/irohad/consensus/yac/timer.hpp +++ b/irohad/consensus/yac/timer.hpp @@ -8,29 +8,20 @@ #include -namespace iroha { - namespace consensus { - namespace yac { +namespace iroha::consensus::yac { + /** + * Interface provide timer for yac implementation + */ + class Timer { + public: + /** + * Invoke handler with class-specific strategy + * @param handler - function, that will be invoked + */ + virtual void invokeAfterDelay(std::function handler) = 0; - /** - * Interface provide timer for yac implementation - */ - class Timer { - public: - /** - * Invoke handler with class-specific strategy - * @param handler - function, that will be invoked - */ - virtual void invokeAfterDelay(std::function handler) = 0; + virtual ~Timer() = default; + }; +} // namespace iroha::consensus::yac - /** - * Stop timer - */ - virtual void deny() = 0; - - virtual ~Timer() = default; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha #endif // IROHA_YAC_TIMER_HPP diff --git a/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp b/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp new file mode 100644 index 00000000000..c828be33bf6 --- /dev/null +++ b/irohad/consensus/yac/transport/impl/consensus_service_impl.cpp @@ -0,0 +1,39 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "consensus/yac/transport/impl/consensus_service_impl.hpp" + +#include "consensus/yac/transport/yac_pb_converters.hpp" + +using iroha::consensus::yac::ServiceImpl; + +ServiceImpl::ServiceImpl(logger::LoggerPtr log, + std::function)> callback) + : callback_(std::move(callback)), log_(std::move(log)) {} + +grpc::Status ServiceImpl::SendState( + ::grpc::ServerContext *context, + const ::iroha::consensus::yac::proto::State *request, + ::google::protobuf::Empty *response) { + std::vector state; + for (const auto &pb_vote : request->votes()) { + if (auto vote = PbConverters::deserializeVote(pb_vote, log_)) { + state.push_back(*vote); + } + } + if (state.empty()) { + log_->info("Received an empty votes collection"); + return grpc::Status::CANCELLED; + } + if (not sameKeys(state)) { + log_->info("Votes are statelessly invalid: proposal rounds are different"); + return grpc::Status::CANCELLED; + } + + log_->info("Received votes[size={}] from {}", state.size(), context->peer()); + + callback_(std::move(state)); + return grpc::Status::OK; +} diff --git a/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp b/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp new file mode 100644 index 00000000000..936e22a282a --- /dev/null +++ b/irohad/consensus/yac/transport/impl/consensus_service_impl.hpp @@ -0,0 +1,44 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_YAC_SERVICE_IMPL_HPP +#define IROHA_YAC_SERVICE_IMPL_HPP + +#include "yac.grpc.pb.h" + +#include + +#include "consensus/yac/vote_message.hpp" +#include "logger/logger_fwd.hpp" + +namespace iroha::consensus::yac { + /** + * Class which provides implementation of server-side transport for + * consensus based on grpc + */ + class ServiceImpl : public proto::Yac::Service { + public: + using Service = proto::Yac; + + ServiceImpl(logger::LoggerPtr log, + std::function)> callback); + + /** + * Receive votes from another peer; + * Naming is confusing, because this is rpc call that + * perform on another machine; + */ + grpc::Status SendState(::grpc::ServerContext *context, + const ::iroha::consensus::yac::proto::State *request, + ::google::protobuf::Empty *response) override; + + private: + std::function)> callback_; + + logger::LoggerPtr log_; + }; +} // namespace iroha::consensus::yac + +#endif // IROHA_YAC_SERVICE_IMPL_HPP diff --git a/irohad/consensus/yac/transport/impl/network_impl.cpp b/irohad/consensus/yac/transport/impl/network_impl.cpp index f74aa59d10f..e7226b90129 100644 --- a/irohad/consensus/yac/transport/impl/network_impl.cpp +++ b/irohad/consensus/yac/transport/impl/network_impl.cpp @@ -14,96 +14,75 @@ #include "consensus/yac/vote_message.hpp" #include "interfaces/common_objects/peer.hpp" #include "logger/logger.hpp" +#include "main/subscription.hpp" #include "network/impl/client_factory.hpp" #include "yac.pb.h" -namespace iroha { - namespace consensus { - namespace yac { - // ----------| Public API |---------- +using iroha::consensus::yac::NetworkImpl; - NetworkImpl::NetworkImpl( - std::shared_ptr> - async_call, - std::unique_ptr client_factory, - logger::LoggerPtr log) - : async_call_(async_call), - client_factory_(std::move(client_factory)), - log_(std::move(log)) {} +// ----------| Public API |---------- +NetworkImpl::NetworkImpl(std::unique_ptr client_factory, + logger::LoggerPtr log) + : client_factory_(std::move(client_factory)), log_(std::move(log)) {} - void NetworkImpl::subscribe( - std::shared_ptr handler) { - handler_ = handler; - } +void NetworkImpl::stop() { + std::lock_guard stop_lock(stop_mutex_); + stop_requested_ = true; +} - void NetworkImpl::stop() { - std::lock_guard stop_lock(stop_mutex_); - stop_requested_ = true; - } +void NetworkImpl::sendState(const shared_model::interface::Peer &to, + const std::vector &state) { + std::lock_guard stop_lock(stop_mutex_); + if (stop_requested_) { + log_->warn("Not sending state to {} because stop was requested.", to); + return; + } - void NetworkImpl::sendState(const shared_model::interface::Peer &to, - const std::vector &state) { - std::lock_guard stop_lock(stop_mutex_); - if (stop_requested_) { - log_->warn("Not sending state to {} because stop was requested.", to); - return; - } - - proto::State request; - for (const auto &vote : state) { - auto pb_vote = request.add_votes(); - *pb_vote = PbConverters::serializeVote(vote); - } + proto::State request; + for (const auto &vote : state) { + auto pb_vote = request.add_votes(); + *pb_vote = PbConverters::serializeVote(vote); + } - client_factory_->createClient(to).match( - [&](auto client) { - async_call_->Call( - [client = std::move(client.value), - request = std::move(request), - log = log_, - log_sending_msg = fmt::format( - "Send votes bundle[size={}] to {}", state.size(), to)]( - auto context, auto cq) { - log->info(log_sending_msg); - return client->AsyncSendState(context, request, cq); - }); - }, - [&](const auto &error) { - log_->error("Could not send state to {}: {}", to, error.error); - }); - } + auto maybe_client = client_factory_->createClient(to); + if (expected::hasError(maybe_client)) { + log_->error( + "Could not send state to {}: {}", to, maybe_client.assumeError()); + return; + } + std::shared_ptr client = + std::move(maybe_client).assumeValue(); - grpc::Status NetworkImpl::SendState( - ::grpc::ServerContext *context, - const ::iroha::consensus::yac::proto::State *request, - ::google::protobuf::Empty *response) { - std::vector state; - for (const auto &pb_vote : request->votes()) { - if (auto vote = PbConverters::deserializeVote(pb_vote, log_)) { - state.push_back(*vote); - } - } - if (state.empty()) { - log_->info("Received an empty votes collection"); - return grpc::Status::CANCELLED; - } - if (not sameKeys(state)) { - log_->info( - "Votes are statelessly invalid: proposal rounds are different"); - return grpc::Status::CANCELLED; + log_->debug("Propagating votes for {}, size={} to {}", + state.front().hash.vote_round, + state.size(), + to); + getSubscription()->dispatcher()->add( + getSubscription()->dispatcher()->kExecuteInPool, + [request(std::move(request)), + client(std::move(client)), + log(utils::make_weak(log_)), + log_sending_msg(fmt::format("Send votes bundle[size={}] for {} to {}", + state.size(), + state.front().hash.vote_round, + to))] { + auto maybe_log = log.lock(); + if (not maybe_log) { + return; } - - log_->info( - "Received votes[size={}] from {}", state.size(), context->peer()); - - if (auto notifications = handler_.lock()) { - notifications->onState(std::move(state)); + grpc::ClientContext context; + context.set_wait_for_ready(true); + context.set_deadline(std::chrono::system_clock::now() + + std::chrono::seconds(5)); + google::protobuf::Empty response; + maybe_log->info(log_sending_msg); + auto status = client->SendState(&context, request, &response); + if (not status.ok()) { + maybe_log->warn( + "RPC failed: {} {}", context.peer(), status.error_message()); + return; } else { - log_->error("Unable to lock the subscriber"); + maybe_log->info("RPC succeeded: {}", context.peer()); } - return grpc::Status::OK; - } - - } // namespace yac - } // namespace consensus -} // namespace iroha + }); +} diff --git a/irohad/consensus/yac/transport/impl/network_impl.hpp b/irohad/consensus/yac/transport/impl/network_impl.hpp index bf89b42335b..bb5432f1293 100644 --- a/irohad/consensus/yac/transport/impl/network_impl.hpp +++ b/irohad/consensus/yac/transport/impl/network_impl.hpp @@ -11,79 +11,41 @@ #include #include -#include -#include "consensus/yac/outcome_messages.hpp" #include "consensus/yac/vote_message.hpp" -#include "interfaces/common_objects/peer.hpp" -#include "interfaces/common_objects/types.hpp" #include "logger/logger_fwd.hpp" -#include "network/impl/async_grpc_client.hpp" #include "network/impl/client_factory.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - /** - * Class which provides implementation of transport for consensus based on - * grpc - */ - class NetworkImpl : public YacNetwork, public proto::Yac::Service { - public: - using Service = proto::Yac; - using ClientFactory = iroha::network::ClientFactory; - - explicit NetworkImpl( - std::shared_ptr> - async_call, - std::unique_ptr> client_factory, - logger::LoggerPtr log); - - void subscribe( - std::shared_ptr handler) override; - - void sendState(const shared_model::interface::Peer &to, - const std::vector &state) override; - - /** - * Receive votes from another peer; - * Naming is confusing, because this is rpc call that - * perform on another machine; - */ - grpc::Status SendState( - ::grpc::ServerContext *context, - const ::iroha::consensus::yac::proto::State *request, - ::google::protobuf::Empty *response) override; - - void stop() override; - - private: - /** - * Subscriber of network messages - */ - std::weak_ptr handler_; - - /** - * Rpc call to provide an ability to perform call grpc endpoints - */ - std::shared_ptr> - async_call_; - - /** - * Yac stub creator - */ - std::unique_ptr client_factory_; - - std::mutex stop_mutex_; - bool stop_requested_{false}; - - logger::LoggerPtr log_; - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + /** + * Class which provides implementation of client-side transport for + * consensus based on grpc + */ + class NetworkImpl : public YacNetwork { + public: + using Service = proto::Yac; + using ClientFactory = iroha::network::ClientFactory; + + NetworkImpl(std::unique_ptr> client_factory, + logger::LoggerPtr log); + + void sendState(const shared_model::interface::Peer &to, + const std::vector &state) override; + + void stop() override; + + private: + /** + * Yac stub creator + */ + std::unique_ptr client_factory_; + + std::mutex stop_mutex_; + bool stop_requested_{false}; + + logger::LoggerPtr log_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_NETWORK_IMPL_HPP diff --git a/irohad/consensus/yac/transport/yac_network_interface.hpp b/irohad/consensus/yac/transport/yac_network_interface.hpp index af926d69ed8..a5bb9208e02 100644 --- a/irohad/consensus/yac/transport/yac_network_interface.hpp +++ b/irohad/consensus/yac/transport/yac_network_interface.hpp @@ -7,54 +7,47 @@ #define IROHA_YAC_NETWORK_INTERFACE_HPP #include +#include #include -namespace shared_model { - namespace interface { - class Peer; - } // namespace interface -} // namespace shared_model - -namespace iroha { - namespace consensus { - namespace yac { - - struct VoteMessage; - - class YacNetworkNotifications { - public: - /** - * Callback on receiving collection of votes - * @param state - provided message - */ - virtual void onState(std::vector state) = 0; - - virtual ~YacNetworkNotifications() = default; - }; - - class YacNetwork { - public: - virtual void subscribe( - std::shared_ptr handler) = 0; - - /** - * Directly share collection of votes - * @param to - peer recipient - * @param state - message for sending - */ - virtual void sendState(const shared_model::interface::Peer &to, - const std::vector &state) = 0; - - /// Prevent any new outgoing network activity. Be passive. - virtual void stop() = 0; - - /** - * Virtual destructor required for inheritance - */ - virtual ~YacNetwork() = default; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +#include "consensus/yac/storage/storage_result.hpp" + +namespace shared_model::interface { + class Peer; +} // namespace shared_model::interface + +namespace iroha::consensus::yac { + struct VoteMessage; + + class YacNetworkNotifications { + public: + /** + * Callback on receiving collection of votes + * @param state - provided message + */ + virtual std::optional onState(std::vector state) = 0; + + virtual ~YacNetworkNotifications() = default; + }; + + class YacNetwork { + public: + /** + * Directly share collection of votes + * @param to - peer recipient + * @param state - message for sending + */ + virtual void sendState(const shared_model::interface::Peer &to, + const std::vector &state) = 0; + + /// Prevent any new outgoing network activity. Be passive. + virtual void stop() = 0; + + /** + * Virtual destructor required for inheritance + */ + virtual ~YacNetwork() = default; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_NETWORK_INTERFACE_HPP diff --git a/irohad/consensus/yac/transport/yac_pb_converters.hpp b/irohad/consensus/yac/transport/yac_pb_converters.hpp index dd87dcbc4ca..79cf15352d8 100644 --- a/irohad/consensus/yac/transport/yac_pb_converters.hpp +++ b/irohad/consensus/yac/transport/yac_pb_converters.hpp @@ -14,146 +14,138 @@ #include "validators/field_validator.hpp" #include "yac.pb.h" -namespace iroha { - namespace consensus { - namespace yac { - class PbConverters { - private: - static inline proto::Vote serializeRoundAndHashes( - const VoteMessage &vote) { - proto::Vote pb_vote; - - auto hash = pb_vote.mutable_hash(); - auto hash_round = hash->mutable_vote_round(); - hash_round->set_block_round(vote.hash.vote_round.block_round); - hash_round->set_reject_round(vote.hash.vote_round.reject_round); - auto hash_vote_hashes = hash->mutable_vote_hashes(); - hash_vote_hashes->set_proposal(vote.hash.vote_hashes.proposal_hash); - hash_vote_hashes->set_block(vote.hash.vote_hashes.block_hash); - - return pb_vote; - } - - static inline VoteMessage deserealizeRoundAndHashes( - const proto::Vote &pb_vote) { - VoteMessage vote; - - vote.hash.vote_round = - Round{pb_vote.hash().vote_round().block_round(), - pb_vote.hash().vote_round().reject_round()}; - vote.hash.vote_hashes = - YacHash::VoteHashes{pb_vote.hash().vote_hashes().proposal(), - pb_vote.hash().vote_hashes().block()}; - - return vote; - } - - public: - static proto::Vote serializeVotePayload(const VoteMessage &vote) { - auto pb_vote = serializeRoundAndHashes(vote); - - if (vote.hash.block_signature) { - auto block_signature = - pb_vote.mutable_hash()->mutable_block_signature(); - auto signature = hexstringToBytestringResult( - vote.hash.block_signature->signedData()); - auto public_key = hexstringToBytestringResult( - vote.hash.block_signature->publicKey()); - block_signature->set_signature(std::move(signature).assumeValue()); - block_signature->set_pubkey(std::move(public_key).assumeValue()); - } - - return pb_vote; - } - - static proto::Vote serializeVote(const VoteMessage &vote) { - auto pb_vote = serializeRoundAndHashes(vote); - - if (vote.hash.block_signature) { - auto block_signature = - pb_vote.mutable_hash()->mutable_block_signature(); - auto signature = hexstringToBytestringResult( - vote.hash.block_signature->signedData()); - auto public_key = hexstringToBytestringResult( - vote.hash.block_signature->publicKey()); - block_signature->set_signature(std::move(signature).assumeValue()); - block_signature->set_pubkey(std::move(public_key).assumeValue()); - } - - auto vote_signature = pb_vote.mutable_signature(); - auto signature = - hexstringToBytestringResult(vote.signature->signedData()); - auto public_key = - hexstringToBytestringResult(vote.signature->publicKey()); - vote_signature->set_signature(std::move(signature).assumeValue()); - vote_signature->set_pubkey(std::move(public_key).assumeValue()); - - return pb_vote; - } +namespace iroha::consensus::yac { + class PbConverters { + private: + static inline proto::Vote serializeRoundAndHashes(const VoteMessage &vote) { + proto::Vote pb_vote; + + auto hash = pb_vote.mutable_hash(); + auto hash_round = hash->mutable_vote_round(); + hash_round->set_block_round(vote.hash.vote_round.block_round); + hash_round->set_reject_round(vote.hash.vote_round.reject_round); + auto hash_vote_hashes = hash->mutable_vote_hashes(); + hash_vote_hashes->set_proposal(vote.hash.vote_hashes.proposal_hash); + hash_vote_hashes->set_block(vote.hash.vote_hashes.block_hash); + + return pb_vote; + } + + static inline VoteMessage deserealizeRoundAndHashes( + const proto::Vote &pb_vote) { + VoteMessage vote; + + vote.hash.vote_round = Round{pb_vote.hash().vote_round().block_round(), + pb_vote.hash().vote_round().reject_round()}; + vote.hash.vote_hashes = + YacHash::VoteHashes{pb_vote.hash().vote_hashes().proposal(), + pb_vote.hash().vote_hashes().block()}; + + return vote; + } + + public: + static proto::Vote serializeVotePayload(const VoteMessage &vote) { + auto pb_vote = serializeRoundAndHashes(vote); + + if (vote.hash.block_signature) { + auto block_signature = + pb_vote.mutable_hash()->mutable_block_signature(); + auto signature = hexstringToBytestringResult( + vote.hash.block_signature->signedData()); + auto public_key = + hexstringToBytestringResult(vote.hash.block_signature->publicKey()); + block_signature->set_signature(std::move(signature).assumeValue()); + block_signature->set_pubkey(std::move(public_key).assumeValue()); + } + + return pb_vote; + } + + static proto::Vote serializeVote(const VoteMessage &vote) { + auto pb_vote = serializeRoundAndHashes(vote); + + if (vote.hash.block_signature) { + auto block_signature = + pb_vote.mutable_hash()->mutable_block_signature(); + auto signature = hexstringToBytestringResult( + vote.hash.block_signature->signedData()); + auto public_key = + hexstringToBytestringResult(vote.hash.block_signature->publicKey()); + block_signature->set_signature(std::move(signature).assumeValue()); + block_signature->set_pubkey(std::move(public_key).assumeValue()); + } + + auto vote_signature = pb_vote.mutable_signature(); + auto signature = + hexstringToBytestringResult(vote.signature->signedData()); + auto public_key = + hexstringToBytestringResult(vote.signature->publicKey()); + vote_signature->set_signature(std::move(signature).assumeValue()); + vote_signature->set_pubkey(std::move(public_key).assumeValue()); + + return pb_vote; + } + + static boost::optional deserializeVote( + const proto::Vote &pb_vote, logger::LoggerPtr log) { + // TODO IR-428 igor-egorov refactor PbConverters - do the class + // instantiable + static const uint64_t kMaxBatchSize{0}; + // This is a workaround for the following ProtoCommonObjectsFactory. + // We able to do this, because we don't have batches in consensus. + static shared_model::proto::ProtoCommonObjectsFactory< + shared_model::validation::FieldValidator> + factory{std::make_shared( + kMaxBatchSize)}; + + auto vote = deserealizeRoundAndHashes(pb_vote); + + auto deserialize = [&](auto &pubkey, auto &signature, const auto &msg) { + auto pubkey_hex = bytestringToHexstring(pubkey); + auto signature_hex = bytestringToHexstring(signature); + using shared_model::interface::types::PublicKeyHexStringView; + using shared_model::interface::types::SignedHexStringView; + return factory + .createSignature(PublicKeyHexStringView{pubkey_hex}, + SignedHexStringView{signature_hex}) + .match( + [&](auto &&sig) + -> boost::optional< + std::unique_ptr> { + return std::move(sig).value; + }, + [&](const auto &reason) + -> boost::optional< + std::unique_ptr> { + log->error(msg, reason.error); + return boost::none; + }); + }; - static boost::optional deserializeVote( - const proto::Vote &pb_vote, logger::LoggerPtr log) { - // TODO IR-428 igor-egorov refactor PbConverters - do the class - // instantiable - static const uint64_t kMaxBatchSize{0}; - // This is a workaround for the following ProtoCommonObjectsFactory. - // We able to do this, because we don't have batches in consensus. - static shared_model::proto::ProtoCommonObjectsFactory< - shared_model::validation::FieldValidator> - factory{ - std::make_shared( - kMaxBatchSize)}; - - auto vote = deserealizeRoundAndHashes(pb_vote); - - auto deserialize = [&](auto &pubkey, - auto &signature, - const auto &msg) { - auto pubkey_hex = bytestringToHexstring(pubkey); - auto signature_hex = bytestringToHexstring(signature); - using shared_model::interface::types::PublicKeyHexStringView; - using shared_model::interface::types::SignedHexStringView; - return factory - .createSignature(PublicKeyHexStringView{pubkey_hex}, - SignedHexStringView{signature_hex}) - .match( - [&](auto &&sig) -> boost::optional> { - return std::move(sig).value; - }, - [&](const auto &reason) - -> boost::optional> { - log->error(msg, reason.error); - return boost::none; - }); - }; - - if (pb_vote.hash().has_block_signature()) { - if (auto block_signature = - deserialize(pb_vote.hash().block_signature().pubkey(), - pb_vote.hash().block_signature().signature(), - "Cannot build vote hash block signature: {}")) { - vote.hash.block_signature = *std::move(block_signature); - } else { - return boost::none; - } - } - - if (auto vote_signature = - deserialize(pb_vote.signature().pubkey(), - pb_vote.signature().signature(), - "Cannot build vote signature: {}")) { - vote.signature = *std::move(vote_signature); - } else { - return boost::none; - } - - return vote; + if (pb_vote.hash().has_block_signature()) { + if (auto block_signature = + deserialize(pb_vote.hash().block_signature().pubkey(), + pb_vote.hash().block_signature().signature(), + "Cannot build vote hash block signature: {}")) { + vote.hash.block_signature = *std::move(block_signature); + } else { + return boost::none; } - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + } + + if (auto vote_signature = + deserialize(pb_vote.signature().pubkey(), + pb_vote.signature().signature(), + "Cannot build vote signature: {}")) { + vote.signature = *std::move(vote_signature); + } else { + return boost::none; + } + + return vote; + } + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_PB_CONVERTERS_HPP diff --git a/irohad/consensus/yac/vote_message.hpp b/irohad/consensus/yac/vote_message.hpp index fbc3af70bd6..0624b79c8a4 100644 --- a/irohad/consensus/yac/vote_message.hpp +++ b/irohad/consensus/yac/vote_message.hpp @@ -8,40 +8,50 @@ #include +#include #include "consensus/yac/yac_hash_provider.hpp" // for YacHash #include "interfaces/common_objects/signature.hpp" #include "utils/string_builder.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - /** - * VoteMessage represents voting for some block; - */ - struct VoteMessage { - YacHash hash; - std::shared_ptr signature; - - bool operator==(const VoteMessage &rhs) const { - return hash == rhs.hash and *signature == *rhs.signature; - } - - bool operator!=(const VoteMessage &rhs) const { - return not(*this == rhs); - } - - std::string toString() const { - return shared_model::detail::PrettyStringBuilder() - .init("VoteMessage") - .appendNamed("yac hash", hash) - .appendNamed("signature", signature) - .finalize(); - } - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + /** + * VoteMessage represents voting for some block; + */ + struct VoteMessage { + YacHash hash; + std::shared_ptr signature; + + bool operator==(const VoteMessage &rhs) const { + return hash == rhs.hash and *signature == *rhs.signature; + } + + bool operator!=(const VoteMessage &rhs) const { + return not(*this == rhs); + } + + std::string toString() const { + return shared_model::detail::PrettyStringBuilder() + .init("VoteMessage") + .appendNamed("yac hash", hash) + .appendNamed("signature", signature) + .finalize(); + } + }; +} // namespace iroha::consensus::yac + +namespace std { + template <> + struct hash { + std::size_t operator()(iroha::consensus::yac::VoteMessage const &m) const + noexcept { + std::size_t seed = 0; + boost::hash_combine(seed, m.signature->publicKey()); + boost::hash_combine(seed, m.hash.vote_round); + boost::hash_combine(seed, m.hash.vote_hashes.proposal_hash); + boost::hash_combine(seed, m.hash.vote_hashes.block_hash); + return seed; + } + }; +} // namespace std #endif // IROHA_VOTE_MESSAGE_HPP diff --git a/irohad/consensus/yac/yac.hpp b/irohad/consensus/yac/yac.hpp index 795cc7f71f0..4dba30716c3 100644 --- a/irohad/consensus/yac/yac.hpp +++ b/irohad/consensus/yac/yac.hpp @@ -9,131 +9,110 @@ #include "consensus/yac/transport/yac_network_interface.hpp" // for YacNetworkNotifications #include "consensus/yac/yac_gate.hpp" // for HashGate +#include #include -#include +#include -#include -#include #include "consensus/yac/cluster_order.hpp" // for ClusterOrdering #include "consensus/yac/outcome_messages.hpp" // because messages passed by value #include "consensus/yac/storage/yac_vote_storage.hpp" // for VoteStorage #include "logger/logger_fwd.hpp" -#include - -namespace iroha { - namespace consensus { - namespace yac { - - class YacCryptoProvider; - class Timer; - - class Yac : public HashGate, public YacNetworkNotifications { - public: - /** - * Method for creating Yac consensus object - * @param delay for timer in milliseconds - */ - static std::shared_ptr create( - YacVoteStorage vote_storage, - std::shared_ptr network, - std::shared_ptr crypto, - std::shared_ptr timer, - ClusterOrdering order, - Round round, - rxcpp::observe_on_one_worker worker, - logger::LoggerPtr log); - - Yac(YacVoteStorage vote_storage, - std::shared_ptr network, - std::shared_ptr crypto, - std::shared_ptr timer, - ClusterOrdering order, - Round round, - rxcpp::observe_on_one_worker worker, - logger::LoggerPtr log); - - ~Yac() override; - - // ------|Hash gate|------ - - void vote(YacHash hash, - ClusterOrdering order, - boost::optional alternative_order = - boost::none) override; - - rxcpp::observable onOutcome() override; - - // ------|Network notifications|------ - - void onState(std::vector state) override; - - void stop() override; - - private: - // ------|Private interface|------ - - /** - * Voting step is strategy of propagating vote - * until commit/reject message received - */ - void votingStep(VoteMessage vote); - - /** - * Erase temporary data of current round - */ - void closeRound(); - - /// Get cluster_order_ or alternative_order_ if present - ClusterOrdering &getCurrentOrder(); - - /** - * Find corresponding peer in the ledger from vote message - * @param vote message containing peer information - * @return peer if it is present in the ledger, boost::none otherwise - */ - boost::optional> - findPeer(const VoteMessage &vote); - - /// Remove votes from unknown peers from given vector. - void removeUnknownPeersVotes(std::vector &votes, - ClusterOrdering &order); - - // ------|Apply data|------ - /** - * @pre lock is locked - * @post lock is unlocked - */ - void applyState(const std::vector &state, - std::unique_lock &lock); - - // ------|Propagation|------ - void propagateState(const std::vector &msg); - void propagateStateDirectly(const shared_model::interface::Peer &to, - const std::vector &msg); - void tryPropagateBack(const std::vector &state); - - // ------|Logger|------ - logger::LoggerPtr log_; - - std::mutex mutex_; - - // ------|One round|------ - ClusterOrdering cluster_order_; - boost::optional alternative_order_; - Round round_; - - // ------|Fields|------ - rxcpp::observe_on_one_worker worker_; - rxcpp::composite_subscription notifier_lifetime_; - rxcpp::subjects::synchronize notifier_; - YacVoteStorage vote_storage_; - std::shared_ptr network_; - std::shared_ptr crypto_; - std::shared_ptr timer_; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + class YacCryptoProvider; + class Timer; + + class Yac : public HashGate, public YacNetworkNotifications { + public: + /** + * Method for creating Yac consensus object + * @param delay for timer in milliseconds + */ + static std::shared_ptr create( + YacVoteStorage vote_storage, + std::shared_ptr network, + std::shared_ptr crypto, + std::shared_ptr timer, + shared_model::interface::types::PeerList order, + Round round, + logger::LoggerPtr log); + + Yac(YacVoteStorage vote_storage, + std::shared_ptr network, + std::shared_ptr crypto, + std::shared_ptr timer, + shared_model::interface::types::PeerList order, + Round round, + logger::LoggerPtr log); + + // ------|Hash gate|------ + + void vote(YacHash hash, + ClusterOrdering order, + std::optional alternative_order = + std::nullopt) override; + + std::optional processRoundSwitch( + consensus::Round const &round, + shared_model::interface::types::PeerList const &peers) override; + + // ------|Network notifications|------ + + std::optional onState(std::vector state) override; + + void stop() override; + + private: + // ------|Private interface|------ + + /** + * Voting step is strategy of propagating vote + * until commit/reject message received + */ + void votingStep(VoteMessage vote, + ClusterOrdering order, + uint32_t attempt = 0); + + /// Get cluster_order_ or alternative_order_ if present + shared_model::interface::types::PeerList &getCurrentOrder(); + + /** + * Find corresponding peer in the ledger from vote message + * @param vote message containing peer information + * @return peer if it is present in the ledger, std::nullopt otherwise + */ + std::optional> findPeer( + const VoteMessage &vote); + + /// Remove votes from unknown peers from given vector. + void removeUnknownPeersVotes( + std::vector &votes, + shared_model::interface::types::PeerList const &order); + + // ------|Apply data|------ + std::optional applyState(const std::vector &state); + + // ------|Propagation|------ + void propagateState(const std::vector &msg); + void propagateStateDirectly(const shared_model::interface::Peer &to, + const std::vector &msg); + void tryPropagateBack(const std::vector &state); + + // ------|Logger|------ + logger::LoggerPtr log_; + + // ------|One round|------ + shared_model::interface::types::PeerList cluster_order_; + std::optional alternative_order_; + Round round_; + + // ------|Fields|------ + YacVoteStorage vote_storage_; + std::shared_ptr network_; + std::shared_ptr crypto_; + std::shared_ptr timer_; + std::map> future_states_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_HPP diff --git a/irohad/consensus/yac/yac_crypto_provider.hpp b/irohad/consensus/yac/yac_crypto_provider.hpp index 0e2a97777c6..0c23e50d5f3 100644 --- a/irohad/consensus/yac/yac_crypto_provider.hpp +++ b/irohad/consensus/yac/yac_crypto_provider.hpp @@ -8,33 +8,27 @@ #include "consensus/yac/yac_hash_provider.hpp" // for YacHash (passed by copy) -namespace iroha { - namespace consensus { - namespace yac { - - struct VoteMessage; - - class YacCryptoProvider { - public: - /** - * Verify signatory of message - * @param msg - for verification - * @return true if signature correct - */ - virtual bool verify(const std::vector &msg) = 0; - - /** - * Generate vote for provided hash; - * @param hash - hash for signing - * @return vote - */ - virtual VoteMessage getVote(YacHash hash) = 0; - - virtual ~YacCryptoProvider() = default; - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + struct VoteMessage; + + class YacCryptoProvider { + public: + /** + * Verify signatory of message + * @param msg - for verification + * @return true if signature correct + */ + virtual bool verify(const std::vector &msg) = 0; + + /** + * Generate vote for provided hash; + * @param hash - hash for signing + * @return vote + */ + virtual VoteMessage getVote(YacHash hash) = 0; + + virtual ~YacCryptoProvider() = default; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_CRYPTO_PROVIDER_HPP diff --git a/irohad/consensus/yac/yac_gate.hpp b/irohad/consensus/yac/yac_gate.hpp index bc15b592e5a..8bbc13754c3 100644 --- a/irohad/consensus/yac/yac_gate.hpp +++ b/irohad/consensus/yac/yac_gate.hpp @@ -6,48 +6,55 @@ #ifndef IROHA_YAC_GATE_HPP #define IROHA_YAC_GATE_HPP -#include +#include + #include "consensus/yac/cluster_order.hpp" #include "consensus/yac/storage/storage_result.hpp" #include "network/consensus_gate.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - class YacHash; - class ClusterOrdering; - - class YacGate : public network::ConsensusGate {}; - - /** - * Provide gate for ya consensus - */ - class HashGate { - public: - /** - * Proposal new hash in network - * @param hash - hash for voting - * @param order - peer ordering for round in hash - * @param alternative_order - peer order - */ - virtual void vote(YacHash hash, - ClusterOrdering order, - boost::optional alternative_order = - boost::none) = 0; - - /** - * Observable with consensus outcomes - commits and rejects - in network - * @return observable for subscription - */ - virtual rxcpp::observable onOutcome() = 0; - - /// Prevent any new outgoing network activity. Be passive. - virtual void stop() = 0; - - virtual ~HashGate() = default; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus { + struct Round; +} + +namespace iroha::consensus::yac { + class YacHash; + class ClusterOrdering; + + class YacGate : public network::ConsensusGate {}; + + /** + * Provide gate for ya consensus + */ + class HashGate { + public: + /** + * Proposal new hash in network + * @param hash - hash for voting + * @param order - peer ordering for round in hash + * @param alternative_order - peer order + */ + virtual void vote( + YacHash hash, + ClusterOrdering order, + std::optional alternative_order = std::nullopt) = 0; + + /** + * Update current state with the new round and peer list, possibly pruning + * the old state. Process states from future if available, and return the + * result + * @param round - new round + * @param peers - new peer list + * @return answer if storage already contains required votes + */ + virtual std::optional processRoundSwitch( + consensus::Round const &round, + shared_model::interface::types::PeerList const &peers) = 0; + + /// Prevent any new outgoing network activity. Be passive. + virtual void stop() = 0; + + virtual ~HashGate() = default; + }; +} // namespace iroha::consensus::yac + #endif // IROHA_YAC_GATE_HPP diff --git a/irohad/consensus/yac/yac_hash_provider.hpp b/irohad/consensus/yac/yac_hash_provider.hpp index 02c83439e9b..006e127bc5f 100644 --- a/irohad/consensus/yac/yac_hash_provider.hpp +++ b/irohad/consensus/yac/yac_hash_provider.hpp @@ -16,103 +16,96 @@ #include "simulator/block_creator_common.hpp" #include "utils/string_builder.hpp" -namespace shared_model { - namespace interface { - class Signature; - class Block; - } // namespace interface -} // namespace shared_model - -namespace iroha { - namespace consensus { - namespace yac { - - class YacHash { - public: - // TODO: 2019-02-08 @muratovv IR-288 refactor YacHash: default ctor, - // block signature param, code in the header. - YacHash(Round round, ProposalHash proposal, BlockHash block) - : vote_round{round}, - vote_hashes{std::move(proposal), std::move(block)} {} - - YacHash() = default; - - /** - * Round, in which peer voted - */ - Round vote_round; - - /** - * Contains hashes of proposal and block, for which peer voted - */ - struct VoteHashes { - /** - * Hash computed from proposal - */ - ProposalHash proposal_hash; - - /** - * Hash computed from block; - */ - BlockHash block_hash; - - std::string toString() const { - return shared_model::detail::PrettyStringBuilder() - .init("VoteHashes") - .appendNamed("proposal", proposal_hash) - .appendNamed("block", block_hash) - .finalize(); - } - }; - VoteHashes vote_hashes; - - /** - * Peer signature of block - */ - std::shared_ptr block_signature; - - bool operator==(const YacHash &obj) const { - return vote_round == obj.vote_round - and vote_hashes.proposal_hash == obj.vote_hashes.proposal_hash - and vote_hashes.block_hash == obj.vote_hashes.block_hash; - }; - - bool operator!=(const YacHash &obj) const { - return not(*this == obj); - }; - - std::string toString() const { - return shared_model::detail::PrettyStringBuilder() - .init("YacHash") - .appendNamed("round", vote_round) - .appendNamed("hashes", vote_hashes) - .finalize(); - } - }; +namespace shared_model::interface { + class Signature; + class Block; +} // namespace shared_model::interface + +namespace iroha::consensus::yac { + class YacHash { + public: + // TODO: 2019-02-08 @muratovv IR-288 refactor YacHash: default ctor, + // block signature param, code in the header. + YacHash(Round round, ProposalHash proposal, BlockHash block) + : vote_round{round}, + vote_hashes{std::move(proposal), std::move(block)} {} + + YacHash() = default; + + /** + * Round, in which peer voted + */ + Round vote_round; + + /** + * Contains hashes of proposal and block, for which peer voted + */ + struct VoteHashes { + /** + * Hash computed from proposal + */ + ProposalHash proposal_hash; /** - * Provide methods related to hash operations in ya consensus + * Hash computed from block; */ - class YacHashProvider { - public: - /** - * Make hash from block creator event - */ - virtual YacHash makeHash( - const simulator::BlockCreatorEvent &event) const = 0; - - /** - * Convert YacHash to model hash - * @param hash - for converting - * @return HashType of model hash - */ - virtual shared_model::interface::types::HashType toModelHash( - const YacHash &hash) const = 0; - - virtual ~YacHashProvider() = default; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + BlockHash block_hash; + + std::string toString() const { + return shared_model::detail::PrettyStringBuilder() + .init("VoteHashes") + .appendNamed("proposal", proposal_hash) + .appendNamed("block", block_hash) + .finalize(); + } + }; + VoteHashes vote_hashes; + + /** + * Peer signature of block + */ + std::shared_ptr block_signature; + + bool operator==(const YacHash &obj) const { + return vote_round == obj.vote_round + and vote_hashes.proposal_hash == obj.vote_hashes.proposal_hash + and vote_hashes.block_hash == obj.vote_hashes.block_hash; + }; + + bool operator!=(const YacHash &obj) const { + return not(*this == obj); + }; + + std::string toString() const { + return shared_model::detail::PrettyStringBuilder() + .init("YacHash") + .appendNamed("round", vote_round) + .appendNamed("hashes", vote_hashes) + .finalize(); + } + }; + + /** + * Provide methods related to hash operations in ya consensus + */ + class YacHashProvider { + public: + /** + * Make hash from block creator event + */ + virtual YacHash makeHash( + const simulator::BlockCreatorEvent &event) const = 0; + + /** + * Convert YacHash to model hash + * @param hash - for converting + * @return HashType of model hash + */ + virtual shared_model::interface::types::HashType toModelHash( + const YacHash &hash) const = 0; + + virtual ~YacHashProvider() = default; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_HASH_PROVIDER_HPP diff --git a/irohad/consensus/yac/yac_peer_orderer.hpp b/irohad/consensus/yac/yac_peer_orderer.hpp index 10626042bee..45ab5743f6d 100644 --- a/irohad/consensus/yac/yac_peer_orderer.hpp +++ b/irohad/consensus/yac/yac_peer_orderer.hpp @@ -6,37 +6,31 @@ #ifndef IROHA_YAC_PEER_ORDERER_HPP #define IROHA_YAC_PEER_ORDERER_HPP -#include +#include #include "consensus/yac/cluster_order.hpp" -namespace iroha { - namespace consensus { - namespace yac { - - class YacHash; - - /** - * Interface responsible for creating order for yac consensus - */ - class YacPeerOrderer { - public: - /** - * Provide order of peers based on hash and initial order of peers - * @param hash - hash-object that used as seed of ordering shuffle - * @param peers - an ordered list of peers - * @return shuffled cluster order - */ - virtual boost::optional getOrdering( - const YacHash &hash, - std::vector> const - &peers) = 0; - - virtual ~YacPeerOrderer() = default; - }; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + class YacHash; + + /** + * Interface responsible for creating order for yac consensus + */ + class YacPeerOrderer { + public: + /** + * Provide order of peers based on hash and initial order of peers + * @param hash - hash-object that used as seed of ordering shuffle + * @param peers - an ordered list of peers + * @return shuffled cluster order + */ + virtual std::optional getOrdering( + const YacHash &hash, + std::vector> const + &peers) = 0; + + virtual ~YacPeerOrderer() = default; + }; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_PEER_ORDERER_HPP diff --git a/irohad/consensus/yac/yac_types.hpp b/irohad/consensus/yac/yac_types.hpp index becf5f224da..15251077a95 100644 --- a/irohad/consensus/yac/yac_types.hpp +++ b/irohad/consensus/yac/yac_types.hpp @@ -8,15 +8,9 @@ #include -namespace iroha { - namespace consensus { - namespace yac { - - /// Type for number of peers in round. - using PeersNumberType = size_t; - - } // namespace yac - } // namespace consensus -} // namespace iroha +namespace iroha::consensus::yac { + /// Type for number of peers in round. + using PeersNumberType = size_t; +} // namespace iroha::consensus::yac #endif // IROHA_YAC_TYPES_HPP diff --git a/irohad/main/CMakeLists.txt b/irohad/main/CMakeLists.txt index 1103747a0ed..25eaaf9ab0e 100644 --- a/irohad/main/CMakeLists.txt +++ b/irohad/main/CMakeLists.txt @@ -24,6 +24,14 @@ target_link_libraries(raw_block_loader logger ) +add_library(rdb_connection_init impl/rocksdb_connection_init.cpp) +target_link_libraries(rdb_connection_init + RocksDB::rocksdb + failover_callback + irohad_version + logger + ) + add_library(pg_connection_init impl/pg_connection_init.cpp) target_link_libraries(pg_connection_init SOCI::postgresql @@ -35,10 +43,13 @@ target_link_libraries(pg_connection_init logger ) -add_library(pending_txs_storage_init impl/pending_transaction_storage_init.cpp) -target_link_libraries(pending_txs_storage_init - pending_txs_storage - ) +add_library(async_subscription + impl/subscription.cpp + impl/async_dispatcher.cpp) + +add_library(sync_subscription + impl/subscription.cpp + impl/sync_dispatcher.cpp) add_library(application application.cpp @@ -79,10 +90,12 @@ target_link_libraries(application block_loader_service mst_processor torii_service - pending_txs_storage_init + pending_txs_storage common pg_connection_init + rdb_connection_init generator + async_subscription ) add_executable(irohad irohad.cpp) @@ -101,6 +114,8 @@ target_link_libraries(irohad logger_manager irohad_version pg_connection_init + rdb_connection_init + maintenance ) add_library(iroha_conf_loader iroha_conf_loader.cpp) diff --git a/irohad/main/application.cpp b/irohad/main/application.cpp index b35a7bd4120..2e2381064a9 100644 --- a/irohad/main/application.cpp +++ b/irohad/main/application.cpp @@ -5,11 +5,11 @@ #include "main/application.hpp" +#include #include -#include -#include #include "ametsuchi/impl/pool_wrapper.hpp" +#include "ametsuchi/impl/rocksdb_storage_impl.hpp" #include "ametsuchi/impl/storage_impl.hpp" #include "ametsuchi/impl/tx_presence_cache_impl.hpp" #include "ametsuchi/impl/wsv_restorer_impl.hpp" @@ -22,8 +22,10 @@ #include "backend/protobuf/proto_tx_status_factory.hpp" #include "common/bind.hpp" #include "common/files.hpp" +#include "common/result_try.hpp" #include "consensus/yac/consensus_outcome_type.hpp" #include "consensus/yac/consistency_model.hpp" +#include "consensus/yac/supermajority_checker.hpp" #include "cryptography/crypto_provider/crypto_model_signer.hpp" #include "cryptography/default_hash_provider.hpp" #include "generator/generator.hpp" @@ -33,10 +35,12 @@ #include "logger/logger.hpp" #include "logger/logger_manager.hpp" #include "main/impl/consensus_init.hpp" -#include "main/impl/pending_transaction_storage_init.hpp" +#include "main/impl/on_demand_ordering_init.hpp" #include "main/impl/pg_connection_init.hpp" +#include "main/impl/rocksdb_connection_init.hpp" #include "main/impl/storage_init.hpp" #include "main/server_runner.hpp" +#include "main/subscription.hpp" #include "multi_sig_transactions/gossip_propagation_strategy.hpp" #include "multi_sig_transactions/mst_processor_impl.hpp" #include "multi_sig_transactions/mst_propagation_strategy_stub.hpp" @@ -53,15 +57,13 @@ #include "network/impl/peer_tls_certificates_provider_root.hpp" #include "network/impl/peer_tls_certificates_provider_wsv.hpp" #include "network/impl/tls_credentials.hpp" -#include "ordering/impl/kick_out_proposal_creation_strategy.hpp" #include "ordering/impl/on_demand_common.hpp" #include "ordering/impl/on_demand_ordering_gate.hpp" -#include "ordering/impl/unique_creation_proposal_strategy.hpp" +#include "pending_txs_storage/impl/pending_txs_storage_impl.hpp" #include "simulator/impl/simulator.hpp" #include "synchronizer/impl/synchronizer_impl.hpp" #include "torii/impl/command_service_impl.hpp" #include "torii/impl/command_service_transport_grpc.hpp" -#include "torii/impl/status_bus_impl.hpp" #include "torii/processor/query_processor_impl.hpp" #include "torii/processor/transaction_processor_impl.hpp" #include "torii/query_service.hpp" @@ -89,62 +91,46 @@ using namespace iroha::synchronizer; using namespace iroha::torii; using namespace iroha::consensus::yac; -using namespace std::chrono_literals; - using shared_model::interface::types::PublicKeyHexStringView; /// Consensus consistency model type. static constexpr iroha::consensus::yac::ConsistencyModel kConsensusConsistencyModel = iroha::consensus::yac::ConsistencyModel::kCft; +static constexpr uint32_t kStaleStreamMaxRoundsDefault = 2; +static constexpr uint32_t kMstExpirationTimeDefault = 1440; +static constexpr uint32_t kMaxRoundsDelayDefault = 3000; + /** * Configuring iroha daemon */ Irohad::Irohad( - const boost::optional &block_store_dir, + const IrohadConfig &config, std::unique_ptr pg_opt, + std::unique_ptr rdb_opt, const std::string &listen_ip, - size_t torii_port, - size_t internal_port, - size_t max_proposal_size, - std::chrono::milliseconds proposal_delay, - std::chrono::milliseconds vote_delay, - std::chrono::minutes mst_expiration_time, - const shared_model::crypto::Keypair &keypair, - std::chrono::milliseconds max_rounds_delay, - size_t stale_stream_max_rounds, - boost::optional - opt_alternative_peers, + const boost::optional &keypair, logger::LoggerManagerTreePtr logger_manager, StartupWsvDataPolicy startup_wsv_data_policy, - std::shared_ptr grpc_channel_params, + StartupWsvSynchronizationPolicy startup_wsv_sync_policy, + std::optional> + maybe_grpc_channel_params, const boost::optional &opt_mst_gossip_params, - const boost::optional &torii_tls_params, boost::optional inter_peer_tls_config) - : block_store_dir_(block_store_dir), + : config_(config), listen_ip_(listen_ip), - torii_port_(torii_port), - torii_tls_params_(torii_tls_params), - internal_port_(internal_port), - max_proposal_size_(max_proposal_size), - proposal_delay_(proposal_delay), - vote_delay_(vote_delay), - is_mst_supported_(opt_mst_gossip_params), - mst_expiration_time_(mst_expiration_time), - max_rounds_delay_(max_rounds_delay), - stale_stream_max_rounds_(stale_stream_max_rounds), - opt_alternative_peers_(std::move(opt_alternative_peers)), - grpc_channel_params_(std::move(grpc_channel_params)), + keypair_(keypair), + startup_wsv_sync_policy_(startup_wsv_sync_policy), + maybe_grpc_channel_params_(std::move(maybe_grpc_channel_params)), opt_mst_gossip_params_(opt_mst_gossip_params), inter_peer_tls_config_(std::move(inter_peer_tls_config)), - pending_txs_storage_init( - std::make_unique()), - keypair(keypair), pg_opt_(std::move(pg_opt)), - ordering_init(logger_manager->getLogger()), - yac_init(std::make_unique()), - consensus_gate_objects(consensus_gate_objects_lifetime), + rdb_opt_(std::move(rdb_opt)), + subscription_engine_(getSubscription()), + ordering_init(std::make_shared( + logger_manager->getLogger())), + yac_init(std::make_shared()), log_manager_(std::move(logger_manager)), log_(log_manager_->getLogger()) { log_->info("created"); @@ -157,7 +143,12 @@ Irohad::Irohad( #if defined(USE_BURROW) vm_caller_ = std::make_unique(); #endif - return initStorage(startup_wsv_data_policy); + return initStorage( + startup_wsv_data_policy, + config_.database_config + && config_.database_config->type == kDbTypeRocksdb + ? StorageType::kRocksDb + : StorageType::kPostgres); })) { log_->error("Storage initialization failed: {}", e.value()); } @@ -170,57 +161,60 @@ Irohad::~Irohad() { if (ordering_gate) { ordering_gate->stop(); } - consensus_gate_objects_lifetime.unsubscribe(); - consensus_gate_events_subscription.unsubscribe(); + subscription_engine_->dispose(); } /** * Initializing iroha daemon */ Irohad::RunResult Irohad::init() { - // clang-format off - return initSettings() - | [this]{ return initValidatorsConfigs();} - | [this]{ return initBatchParser();} - | [this]{ return initValidators();} - | [this]{ return initWsvRestorer(); // Recover WSV from the existing ledger - // to be sure it is consistent - } - | [this]{ return restoreWsv();} - | [this]{ return validateKeypair();} - | [this]{ return initTlsCredentials();} - | [this]{ return initPeerCertProvider();} - | [this]{ return initClientFactory();} - | [this]{ return initCryptoProvider();} - | [this]{ return initNetworkClient();} - | [this]{ return initFactories();} - | [this]{ return initPersistentCache();} - | [this]{ return initOrderingGate();} - | [this]{ return initSimulator();} - | [this]{ return initConsensusCache();} - | [this]{ return initBlockLoader();} - | [this]{ return initConsensusGate();} - | [this]{ return initSynchronizer();} - | [this]{ return initPeerCommunicationService();} - | [this]{ return initStatusBus();} - | [this]{ return initMstProcessor();} - | [this]{ return initPendingTxsStorageWithCache();} - + IROHA_EXPECTED_ERROR_CHECK(initSettings()); + IROHA_EXPECTED_ERROR_CHECK(initValidatorsConfigs()); + IROHA_EXPECTED_ERROR_CHECK(initBatchParser()); + IROHA_EXPECTED_ERROR_CHECK(initValidators()); + // Recover WSV from the existing ledger to be sure it is consistent + IROHA_EXPECTED_ERROR_CHECK(initWsvRestorer()); + IROHA_EXPECTED_ERROR_CHECK(restoreWsv()); + IROHA_EXPECTED_ERROR_CHECK(validateKeypair()); + IROHA_EXPECTED_ERROR_CHECK(initTlsCredentials()); + IROHA_EXPECTED_ERROR_CHECK(initPeerCertProvider()); + IROHA_EXPECTED_ERROR_CHECK(initClientFactory()); + IROHA_EXPECTED_ERROR_CHECK(initCryptoProvider()); + IROHA_EXPECTED_ERROR_CHECK(initNetworkClient()); + IROHA_EXPECTED_ERROR_CHECK(initFactories()); + IROHA_EXPECTED_ERROR_CHECK(initPersistentCache()); + IROHA_EXPECTED_ERROR_CHECK(initOrderingGate()); + IROHA_EXPECTED_ERROR_CHECK(initSimulator()); + IROHA_EXPECTED_ERROR_CHECK(initConsensusCache()); + IROHA_EXPECTED_ERROR_CHECK(initBlockLoader()); + IROHA_EXPECTED_ERROR_CHECK(initConsensusGate()); + IROHA_EXPECTED_ERROR_CHECK(initSynchronizer()); + IROHA_EXPECTED_ERROR_CHECK(initPeerCommunicationService()); + IROHA_EXPECTED_ERROR_CHECK(initStatusBus()); + IROHA_EXPECTED_ERROR_CHECK(initMstProcessor()); + IROHA_EXPECTED_ERROR_CHECK(initPendingTxsStorageWithCache()); // Torii - | [this]{ return initTransactionCommandService();} - | [this]{ return initQueryService();}; - // clang-format on + IROHA_EXPECTED_ERROR_CHECK(initTransactionCommandService()); + IROHA_EXPECTED_ERROR_CHECK(initQueryService()); + return {}; } Irohad::RunResult Irohad::dropStorage() { - return storage->dropBlockStorage() | [this] { return resetWsv(); }; + IROHA_EXPECTED_ERROR_CHECK(storage->dropBlockStorage()); + IROHA_EXPECTED_ERROR_CHECK(resetWsv()); + return {}; } Irohad::RunResult Irohad::resetWsv() { storage.reset(); + db_context_.reset(); log_->info("Recreating schema."); - return initStorage(StartupWsvDataPolicy::kDrop); + return initStorage( + StartupWsvDataPolicy::kDrop, + config_.database_config && config_.database_config->type == kDbTypeRocksdb + ? StorageType::kRocksDb + : StorageType::kPostgres); } /** @@ -232,12 +226,10 @@ Irohad::RunResult Irohad::initSettings() { return expected::makeError("Unable to create Settings"); } - return settingsQuery.get()->get() | [this](auto &&settings) -> RunResult { - this->settings_ = std::move(settings); - - log_->info("[Init] => settings"); - return {}; - }; + IROHA_EXPECTED_TRY_GET_VALUE(settings, settingsQuery.get()->get()); + settings_ = std::move(settings); + log_->info("[Init] => settings"); + return {}; } /** @@ -246,13 +238,13 @@ Irohad::RunResult Irohad::initSettings() { Irohad::RunResult Irohad::initValidatorsConfigs() { validators_config_ = std::make_shared( - max_proposal_size_); + config_.max_proposal_size); block_validators_config_ = std::make_shared( - max_proposal_size_, true); + config_.max_proposal_size, true); proposal_validators_config_ = std::make_shared( - max_proposal_size_, false, true); + config_.max_proposal_size, false, true); log_->info("[Init] => validators configs"); return {}; } @@ -261,71 +253,109 @@ Irohad::RunResult Irohad::initValidatorsConfigs() { * Initializing iroha daemon storage */ Irohad::RunResult Irohad::initStorage( - StartupWsvDataPolicy startup_wsv_data_policy) { - return PgConnectionInit::init(startup_wsv_data_policy, *pg_opt_, log_manager_) - | [this](auto &&pool_wrapper) -> RunResult { - pool_wrapper_ = std::move(pool_wrapper); - query_response_factory_ = - std::make_shared(); - - std::optional> - vm_caller_ref; - if (vm_caller_) { - vm_caller_ref = *vm_caller_.value(); - } + StartupWsvDataPolicy startup_wsv_data_policy, iroha::StorageType type) { + query_response_factory_ = + std::make_shared(); + + std::optional> + vm_caller_ref; + if (vm_caller_) { + vm_caller_ref = *vm_caller_.value(); + } - return ::iroha::initStorage(*pg_opt_, - pool_wrapper_, - pending_txs_storage_, - query_response_factory_, - block_store_dir_, - vm_caller_ref, - log_manager_->getChild("Storage")) - | [&](auto &&v) -> RunResult { + auto storage_creator = [&]() -> RunResult { + auto process_block = + [this](std::shared_ptr block) { + iroha::getSubscription()->notify(EventTypes::kOnBlock, block); + if (ordering_init and tx_processor and pending_txs_storage_ + and mst_storage) { + ordering_init->processCommittedBlock(block); + tx_processor->processCommit(block); + for (auto const &completed_tx : block->transactions()) { + pending_txs_storage_->removeTransaction(completed_tx.hash()); + mst_storage->processFinalizedTransaction(completed_tx.hash()); + } + for (auto const &rejected_tx_hash : + block->rejected_transactions_hashes()) { + pending_txs_storage_->removeTransaction(rejected_tx_hash); + mst_storage->processFinalizedTransaction(rejected_tx_hash); + } + } + }; + + auto st = type == StorageType::kPostgres + ? ::iroha::initStorage(*pg_opt_, + pool_wrapper_, + pending_txs_storage_, + query_response_factory_, + config_.block_store_path, + vm_caller_ref, + process_block, + log_manager_->getChild("Storage")) + : type == StorageType::kRocksDb + ? ::iroha::initStorage(db_context_, + pending_txs_storage_, + query_response_factory_, + config_.block_store_path, + vm_caller_ref, + process_block, + log_manager_->getChild("Storage")) + : iroha::expected::makeError("Unexpected storage type."); + + return st | [&](auto &&v) -> RunResult { storage = std::move(v); - using shared_model::crypto::Hash; - using shared_model::interface::Block; - - finalized_txs_ = - storage->on_commit() - .template lift([](rxcpp::subscriber dest) { - return rxcpp::make_subscriber>( - dest, [=](std::shared_ptr const &block) { - for (auto const &completed_tx : block->transactions()) { - dest.on_next(completed_tx.hash()); - } - for (auto const &rejected_tx_hash : - block->rejected_transactions_hashes()) { - dest.on_next(rejected_tx_hash); - } - }); - }) - .publish() - .ref_count(); - log_->info("[Init] => storage"); return {}; }; }; + + switch (type) { + case StorageType::kPostgres: { + IROHA_EXPECTED_TRY_GET_VALUE( + pool_wrapper, + PgConnectionInit::init( + startup_wsv_data_policy, *pg_opt_, log_manager_)); + pool_wrapper_ = std::move(pool_wrapper); + } break; + + case StorageType::kRocksDb: { + IROHA_EXPECTED_TRY_GET_VALUE( + rdb_port, + RdbConnectionInit::init( + startup_wsv_data_policy, *rdb_opt_, log_manager_)); + db_context_ = + std::make_shared(std::move(rdb_port)); + } break; + + default: + return iroha::expected::makeError( + "Unexpected storage type!"); + } + return storage_creator(); } Irohad::RunResult Irohad::restoreWsv() { - return wsv_restorer_->restoreWsv(*storage) | - [](const auto &ledger_state) -> RunResult { - assert(ledger_state); - if (ledger_state->ledger_peers.empty()) { - return iroha::expected::makeError( - "Have no peers in WSV after restoration!"); - } - return {}; - }; + IROHA_EXPECTED_TRY_GET_VALUE( + ledger_state, + wsv_restorer_->restoreWsv( + *storage, + startup_wsv_sync_policy_ + == StartupWsvSynchronizationPolicy::kWaitForNewBlocks)); + assert(ledger_state); + if (ledger_state->ledger_peers.empty()) { + return iroha::expected::makeError( + "Have no peers in WSV after restoration!"); + } + return {}; } Irohad::RunResult Irohad::validateKeypair() { + BOOST_ASSERT_MSG(keypair_.has_value(), "keypair must be specified somewhere"); + auto peers = storage->createPeerQuery() | [this](auto &&peer_query) { return peer_query->getLedgerPeerByPublicKey( - PublicKeyHexStringView{keypair.publicKey()}); + PublicKeyHexStringView{keypair_->publicKey()}); }; if (not peers) { log_->warn("There is no peer in the ledger with my public key!"); @@ -340,9 +370,10 @@ Irohad::RunResult Irohad::validateKeypair() { Irohad::RunResult Irohad::initTlsCredentials() { const auto &p2p_path = inter_peer_tls_config_ | [](const auto &p2p_config) { return p2p_config.my_tls_creds_path; }; - const auto &torii_path = torii_tls_params_ | [](const auto &torii_config) { - return boost::make_optional(torii_config.key_path); - }; + const auto &torii_path = + config_.torii_tls_params | [](const auto &torii_config) { + return boost::make_optional(torii_config.key_path); + }; auto load_tls_creds = [this](const auto &opt_path, const auto &description, @@ -423,9 +454,11 @@ Irohad::RunResult Irohad::initPeerCertProvider() { * Initializing channel pool. */ Irohad::RunResult Irohad::initClientFactory() { + auto channel_factory = + std::make_unique(this->maybe_grpc_channel_params_); + auto channel_pool = std::make_unique(std::move(channel_factory)); inter_peer_client_factory_ = - std::make_unique(std::make_unique( - std::make_unique(this->grpc_channel_params_))); + std::make_unique(std::move(channel_pool)); return {}; } @@ -434,7 +467,7 @@ Irohad::RunResult Irohad::initClientFactory() { */ Irohad::RunResult Irohad::initCryptoProvider() { crypto_signer_ = - std::make_shared>(keypair); + std::make_shared>(*keypair_); log_->info("[Init] => crypto provider"); return {}; @@ -575,54 +608,23 @@ Irohad::RunResult Irohad::initOrderingGate() { return iroha::expected::makeError( "Failed to create block query"); } - // since delay is 2, it is required to get two more hashes from block store, - // in addition to top block - const size_t kNumBlocks = 3; - auto top_height = (*block_query)->getTopBlockHeight(); - decltype(top_height) block_hashes = - top_height > kNumBlocks ? kNumBlocks : top_height; - - auto hash_stub = shared_model::interface::types::HashType{ - std::string(shared_model::crypto::DefaultHashProvider::kHashLength, '0')}; - std::vector hashes{ - kNumBlocks - block_hashes, hash_stub}; - - for (decltype(top_height) i = top_height - block_hashes + 1; i <= top_height; - ++i) { - auto block_result = (*block_query)->getBlock(i); - - if (auto e = expected::resultToOptionalError(block_result)) { - return iroha::expected::makeError(std::move(e->message)); - } - - auto &block = - boost::get< - expected::Value>>( - block_result) - .value; - hashes.push_back(block->hash()); - } auto factory = std::make_unique>(validators_config_); - std::shared_ptr proposal_strategy = - std::make_shared(); - - ordering_gate = - ordering_init.initOrderingGate(max_proposal_size_, - proposal_delay_, - std::move(hashes), - transaction_factory, - batch_parser, - transaction_batch_factory_, - async_call_, - std::move(factory), - proposal_factory, - persistent_cache, - proposal_strategy, - log_manager_->getChild("Ordering"), - inter_peer_client_factory_); + ordering_gate = ordering_init->initOrderingGate( + config_.max_proposal_size, + std::chrono::milliseconds(config_.proposal_delay), + transaction_factory, + batch_parser, + transaction_batch_factory_, + std::move(factory), + proposal_factory, + persistent_cache, + log_manager_->getChild("Ordering"), + inter_peer_client_factory_, + std::chrono::milliseconds( + config_.proposal_creation_timeout.value_or(kMaxRoundsDelayDefault))); log_->info("[Init] => init ordering gate - [{}]", logger::boolRepr(bool(ordering_gate))); return {}; @@ -632,33 +634,29 @@ Irohad::RunResult Irohad::initOrderingGate() { * Initializing iroha verified proposal creator and block creator */ Irohad::RunResult Irohad::initSimulator() { - return storage->createCommandExecutor() | - [this](auto &&command_executor) -> RunResult { - auto block_factory = - std::make_unique( - // Block factory in simulator uses UnsignedBlockValidator because - // it is not required to check signatures of block here, as they - // will be checked when supermajority of peers will sign the block. - // It is also not required to validate signatures of transactions - // here because they are validated in the ordering gate, where they - // are received from the ordering service. - std::make_unique< - shared_model::validation::DefaultUnsignedBlockValidator>( - block_validators_config_), - std::make_unique()); - - simulator = std::make_shared( - std::move(command_executor), - ordering_gate, - stateful_validator, - storage, - crypto_signer_, - std::move(block_factory), - log_manager_->getChild("Simulator")->getLogger()); - - log_->info("[Init] => init simulator"); - return {}; - }; + IROHA_EXPECTED_TRY_GET_VALUE(command_executor, + storage->createCommandExecutor()); + auto block_factory = std::make_unique( + // Block factory in simulator uses UnsignedBlockValidator because + // it is not required to check signatures of block here, as they + // will be checked when supermajority of peers will sign the block. + // It is also not required to validate signatures of transactions + // here because they are validated in the ordering gate, where they + // are received from the ordering service. + std::make_unique( + block_validators_config_), + std::make_unique()); + + simulator = std::make_shared( + std::move(command_executor), + stateful_validator, + storage, + crypto_signer_, + std::move(block_factory), + log_manager_->getChild("Simulator")->getLogger()); + + log_->info("[Init] => init simulator"); + return {}; } /** @@ -691,38 +689,23 @@ Irohad::RunResult Irohad::initBlockLoader() { * Initializing consensus gate */ Irohad::RunResult Irohad::initConsensusGate() { - auto block_query = storage->createBlockQuery(); - if (not block_query) { - return iroha::expected::makeError( - "Failed to create block query"); - } - auto block_var = - (*block_query)->getBlock((*block_query)->getTopBlockHeight()); - if (auto e = expected::resultToOptionalError(block_var)) { - return iroha::expected::makeError( - "Failed to get the top block: " + e->message); + auto initial_ledger_state = storage->getLedgerState(); + if (not initial_ledger_state) { + return expected::makeError("Failed to fetch ledger state!"); } - auto &block = - boost::get>(&block_var)->value; - consensus_gate = yac_init->initConsensusGate( - {block->height(), ordering::kFirstRejectRound}, - storage, - opt_alternative_peers_, - simulator, + {initial_ledger_state.value()->top_block_info.height + 1, + ordering::kFirstRejectRound}, + config_.initial_peers, + *initial_ledger_state, block_loader, - keypair, + *keypair_, consensus_result_cache_, - vote_delay_, - async_call_, + std::chrono::milliseconds(config_.vote_delay), kConsensusConsistencyModel, log_manager_->getChild("Consensus"), - max_rounds_delay_, inter_peer_client_factory_); - consensus_gate->onOutcome().subscribe( - consensus_gate_events_subscription, - consensus_gate_objects.get_subscriber()); log_->info("[Init] => consensus gate"); return {}; } @@ -731,59 +714,65 @@ Irohad::RunResult Irohad::initConsensusGate() { * Initializing synchronizer */ Irohad::RunResult Irohad::initSynchronizer() { - return storage->createCommandExecutor() | - [this](auto &&command_executor) -> RunResult { - synchronizer = std::make_shared( - std::move(command_executor), - consensus_gate, - chain_validator, - storage, - storage, - block_loader, - log_manager_->getChild("Synchronizer")->getLogger()); - - log_->info("[Init] => synchronizer"); - return {}; - }; -} - -/** - * Initializing peer communication service - */ -Irohad::RunResult Irohad::initPeerCommunicationService() { - pcs = std::make_shared( - ordering_gate, - synchronizer, - simulator, - log_manager_->getChild("PeerCommunicationService")->getLogger()); + IROHA_EXPECTED_TRY_GET_VALUE(command_executor, + storage->createCommandExecutor()); + synchronizer = std::make_shared( + std::move(command_executor), + chain_validator, + storage, + storage, + block_loader, + log_manager_->getChild("Synchronizer")->getLogger()); - pcs->onProposal().subscribe([this](const auto &) { - log_->info("~~~~~~~~~| PROPOSAL ^_^ |~~~~~~~~~ "); - }); + log_->info("[Init] => synchronizer"); + return {}; +} - pcs->onSynchronization().subscribe([this](const auto &event) { +namespace { + void printSynchronizationEvent( + logger::LoggerPtr log, synchronizer::SynchronizationEvent const &event) { using iroha::synchronizer::SynchronizationOutcomeType; switch (event.sync_outcome) { case SynchronizationOutcomeType::kCommit: - log_->info(R"(~~~~~~~~~| COMMIT =^._.^= |~~~~~~~~~ )"); + log->info(R"(~~~~~~~~~| COMMIT =^._.^= |~~~~~~~~~ )"); break; case SynchronizationOutcomeType::kReject: - log_->info(R"(~~~~~~~~~| REJECT \(*.*)/ |~~~~~~~~~ )"); + log->info(R"(~~~~~~~~~| REJECT \(*.*)/ |~~~~~~~~~ )"); break; case SynchronizationOutcomeType::kNothing: - log_->info(R"(~~~~~~~~~| EMPTY (-_-)zzz |~~~~~~~~~ )"); - break; - default: + log->info(R"(~~~~~~~~~| EMPTY (-_-)zzz |~~~~~~~~~ )"); break; } - }); + } +} // namespace + +/** + * Initializing peer communication service + */ +Irohad::RunResult Irohad::initPeerCommunicationService() { + pcs = std::make_shared( + ordering_gate, + log_manager_->getChild("PeerCommunicationService")->getLogger()); log_->info("[Init] => pcs"); return {}; } Irohad::RunResult Irohad::initStatusBus() { - status_bus_ = std::make_shared(); + struct StatusBusImpl final : public StatusBus { + StatusBusImpl(Irohad &irohad) : irohad_(irohad) {} + + void publish(StatusBus::Objects const &response) override { + iroha::getSubscription()->notify(EventTypes::kOnTransactionResponse, + StatusBus::Objects(response)); + if (irohad_.command_service) + irohad_.command_service->processTransactionResponse(response); + } + + private: + Irohad &irohad_; + }; + status_bus_ = std::make_shared(*this); log_->info("[Init] => Tx status bus"); return {}; } @@ -792,15 +781,14 @@ Irohad::RunResult Irohad::initMstProcessor() { auto mst_logger_manager = log_manager_->getChild("MultiSignatureTransactions"); auto mst_state_logger = mst_logger_manager->getChild("State")->getLogger(); - auto mst_completer = std::make_shared(mst_expiration_time_); - auto mst_storage = MstStorageStateImpl::create( + auto mst_completer = std::make_shared(std::chrono::minutes( + config_.mst_expiration_time.value_or(kMstExpirationTimeDefault))); + mst_storage = std::make_shared( mst_completer, - finalized_txs_, mst_state_logger, mst_logger_manager->getChild("Storage")->getLogger()); - pending_txs_storage_init->setFinalizedTxsSubscription(finalized_txs_); std::shared_ptr mst_propagation; - if (is_mst_supported_) { + if (config_.mst_support) { mst_transport = std::make_shared( async_call_, transaction_factory, @@ -808,7 +796,7 @@ Irohad::RunResult Irohad::initMstProcessor() { transaction_batch_factory_, persistent_cache, mst_completer, - PublicKeyHexStringView{keypair.publicKey()}, + PublicKeyHexStringView{keypair_->publicKey()}, std::move(mst_state_logger), mst_logger_manager->getChild("Transport")->getLogger(), std::make_uniquesubscribe(fair_mst_processor); - pending_txs_storage_init->setMstSubscriptions(*mst_processor); - log_->info("[Init] => MST processor"); return {}; } Irohad::RunResult Irohad::initPendingTxsStorage() { - pending_txs_storage_ = - pending_txs_storage_init->createPendingTransactionsStorage(); + pending_txs_storage_ = std::make_shared(); log_->info("[Init] => pending transactions storage"); return {}; } @@ -852,16 +837,49 @@ Irohad::RunResult Irohad::initTransactionCommandService() { auto status_factory = std::make_shared(); auto cs_cache = std::make_shared<::torii::CommandServiceImpl::CacheType>(); - auto tx_processor = std::make_shared( + tx_processor = std::make_shared( pcs, mst_processor, status_bus_, status_factory, - storage->on_commit(), command_service_log_manager->getChild("Processor")->getLogger()); + mst_processor->onStateUpdate().subscribe( + [tx_processor(utils::make_weak(tx_processor)), + pending_txs_storage(utils::make_weak(pending_txs_storage_))]( + std::shared_ptr const &state) { + auto maybe_tx_processor = tx_processor.lock(); + auto maybe_pending_txs_storage = pending_txs_storage.lock(); + if (maybe_tx_processor and maybe_pending_txs_storage) { + maybe_tx_processor->processStateUpdate(state); + maybe_pending_txs_storage->updatedBatchesHandler(state); + } + }); + mst_processor->onPreparedBatches().subscribe( + [tx_processor(utils::make_weak(tx_processor)), + pending_txs_storage(utils::make_weak(pending_txs_storage_))]( + std::shared_ptr const + &batch) { + auto maybe_tx_processor = tx_processor.lock(); + auto maybe_pending_txs_storage = pending_txs_storage.lock(); + if (maybe_tx_processor and maybe_pending_txs_storage) { + maybe_tx_processor->processPreparedBatch(batch); + maybe_pending_txs_storage->removeBatch(batch); + } + }); + mst_processor->onExpiredBatches().subscribe( + [tx_processor(utils::make_weak(tx_processor)), + pending_txs_storage(utils::make_weak(pending_txs_storage_))]( + std::shared_ptr const + &batch) { + auto maybe_tx_processor = tx_processor.lock(); + auto maybe_pending_txs_storage = pending_txs_storage.lock(); + if (maybe_tx_processor and maybe_pending_txs_storage) { + maybe_tx_processor->processExpiredBatch(batch); + maybe_pending_txs_storage->removeBatch(batch); + } + }); command_service = std::make_shared<::torii::CommandServiceImpl>( tx_processor, - storage, status_bus_, status_factory, cs_cache, @@ -875,10 +893,8 @@ Irohad::RunResult Irohad::initTransactionCommandService() { transaction_factory, batch_parser, transaction_batch_factory_, - consensus_gate_objects.get_observable().map([](const auto &) { - return ::torii::CommandServiceTransportGrpc::ConsensusGateEvent{}; - }), - stale_stream_max_rounds_, + config_.stale_stream_max_rounds.value_or( + kStaleStreamMaxRoundsDefault), command_service_log_manager->getChild("Transport")->getLogger()); log_->info("[Init] => command service"); @@ -916,109 +932,210 @@ Irohad::RunResult Irohad::initWsvRestorer() { wsv_restorer_ = std::make_shared( std::move(interface_validator), std::move(proto_validator), - chain_validator); + chain_validator, + log_manager_->getChild("WsvRestorer")->getLogger()); return {}; } +namespace { + struct ProcessGateObjectContext { + std::shared_ptr synchronizer; + std::shared_ptr ordering_init; + std::shared_ptr yac_init; + logger::LoggerPtr log; + std::shared_ptr subscription; + }; + + void processGateObject(ProcessGateObjectContext context, + consensus::GateObject const &object) { + context.subscription->notify( + EventTypes::kOnConsensusGateEvent, + ::torii::CommandServiceTransportGrpc::ConsensusGateEvent{}); + context.log->info("~~~~~~~~~| PROPOSAL ^_^ |~~~~~~~~~ "); + auto event = context.synchronizer->processOutcome(std::move(object)); + if (not event) { + return; + } + context.subscription->notify(EventTypes::kOnSynchronization, + SynchronizationEvent(*event)); + printSynchronizationEvent(context.log, *event); + auto round_switch = + context.ordering_init->processSynchronizationEvent(std::move(*event)); + if (auto maybe_object = context.yac_init->processRoundSwitch( + round_switch.next_round, round_switch.ledger_state)) { + auto round = [](auto &object) { return object.round; }; + context.log->info("Ignoring object with {} because {} is newer", + std::visit(round, object), + std::visit(round, *maybe_object)); + return processGateObject(std::move(context), *maybe_object); + } + context.ordering_init->processRoundSwitch(round_switch); + } +} // namespace + /** * Run iroha daemon */ Irohad::RunResult Irohad::run() { - using iroha::expected::operator|; - using iroha::operator|; + ordering_init->subscribe([simulator(utils::make_weak(simulator)), + consensus_gate(utils::make_weak(consensus_gate)), + tx_processor(utils::make_weak(tx_processor)), + subscription(utils::make_weak(getSubscription()))]( + network::OrderingEvent const &event) { + auto maybe_simulator = simulator.lock(); + auto maybe_consensus_gate = consensus_gate.lock(); + auto maybe_tx_processor = tx_processor.lock(); + auto maybe_subscription = subscription.lock(); + if (maybe_simulator and maybe_consensus_gate and maybe_tx_processor + and maybe_subscription) { + maybe_subscription->notify(EventTypes::kOnProposal, event); + auto verified_proposal = maybe_simulator->processProposal(event); + maybe_subscription->notify(EventTypes::kOnVerifiedProposal, + verified_proposal); + maybe_tx_processor->processVerifiedProposalCreatorEvent( + verified_proposal); + auto block = maybe_simulator->processVerifiedProposal( + std::move(verified_proposal)); + maybe_consensus_gate->vote(std::move(block)); + } + }); + + yac_init->subscribe([synchronizer(utils::make_weak(synchronizer)), + ordering_init(utils::make_weak(ordering_init)), + yac_init(utils::make_weak(yac_init)), + log(utils::make_weak(log_)), + subscription(utils::make_weak(getSubscription()))]( + consensus::GateObject const &object) { + auto maybe_synchronizer = synchronizer.lock(); + auto maybe_ordering_init = ordering_init.lock(); + auto maybe_yac_init = yac_init.lock(); + auto maybe_log = log.lock(); + auto maybe_subscription = subscription.lock(); + if (maybe_synchronizer and maybe_ordering_init and maybe_yac_init + and maybe_log and maybe_subscription) { + processGateObject({std::move(maybe_synchronizer), + std::move(maybe_ordering_init), + std::move(maybe_yac_init), + std::move(maybe_log), + std::move(maybe_subscription)}, + object); + } + }); // Initializing torii server torii_server = std::make_unique( - listen_ip_ + ":" + std::to_string(torii_port_), + listen_ip_ + ":" + std::to_string(config_.torii_port), log_manager_->getChild("ToriiServerRunner")->getLogger(), false); // Initializing internal server internal_server = std::make_unique( - listen_ip_ + ":" + std::to_string(internal_port_), + listen_ip_ + ":" + std::to_string(config_.internal_port), log_manager_->getChild("InternalServerRunner")->getLogger(), false); - auto make_port_logger = [this](std::string server_name) { - return [this, server_name](auto port) -> RunResult { - log_->info("{} server bound on port {}", server_name, port); - return {}; - }; - }; - // Run torii server - auto run_result = torii_server->append(command_service_transport) - .append(query_service) - .run() - | make_port_logger("Torii"); + IROHA_EXPECTED_TRY_GET_VALUE(torii_port, + torii_server->append(command_service_transport) + .append(query_service) + .run()); + log_->info("Torii server bound on port {}", torii_port); // Run torii TLS server - torii_tls_creds_ | [&, this](const auto &tls_creds) { - run_result |= [&, this] { - torii_tls_server = std::make_unique( - listen_ip_ + ":" + std::to_string(torii_tls_params_->port), - log_manager_->getChild("ToriiTlsServerRunner")->getLogger(), - false, - tls_creds); - return (*torii_tls_server) - ->append(command_service_transport) - .append(query_service) - .run() - | make_port_logger("Torii TLS"); - }; - }; + if (torii_tls_creds_) { + torii_tls_server = std::make_unique( + listen_ip_ + ":" + std::to_string(config_.torii_tls_params->port), + log_manager_->getChild("ToriiTlsServerRunner")->getLogger(), + false, + *torii_tls_creds_); + IROHA_EXPECTED_TRY_GET_VALUE(torii_tls_port, + torii_tls_server.value() + ->append(command_service_transport) + .append(query_service) + .run()); + log_->info("Torii TLS server bound on port {}", torii_tls_port); + } // Run internal server - run_result |= [&, this] { - if (is_mst_supported_) { - internal_server->append( - std::static_pointer_cast(mst_transport)); - } - return internal_server->append(ordering_init.service) - .append(yac_init->getConsensusNetwork()) - .append(loader_init.service) - .run() - | make_port_logger("Internal"); - }; - - return run_result | [&]() -> RunResult { - log_->info("===> iroha initialized"); - // initiate first round - auto block_query = storage->createBlockQuery(); - if (not block_query) { - return expected::makeError("Failed to create block query"); - } - auto block_var = - (*block_query)->getBlock((*block_query)->getTopBlockHeight()); - if (auto e = expected::resultToOptionalError(block_var)) { - return expected::makeError("Failed to get the top block: " + e->message); - } - - auto &block = - boost::get>(&block_var)->value; - auto block_height = block->height(); + if (config_.mst_support) { + internal_server->append( + std::static_pointer_cast(mst_transport)); + } + IROHA_EXPECTED_TRY_GET_VALUE(internal_port, + internal_server->append(ordering_init->service) + .append(yac_init->getConsensusNetwork()) + .append(loader_init.service) + .run()); + log_->info("Internal server bound on port {}", internal_port); + + log_->info("===> iroha initialized"); + // initiate first round + auto block_query = storage->createBlockQuery(); + if (not block_query) { + return expected::makeError("Failed to create block query"); + } + auto block_var = + (*block_query)->getBlock((*block_query)->getTopBlockHeight()); + if (auto e = expected::resultToOptionalError(block_var)) { + return expected::makeError("Failed to get the top block: " + e->message); + } - auto peers = storage->createPeerQuery() | - [](auto &&peer_query) { return peer_query->getLedgerPeers(); }; - if (not peers) { - return expected::makeError("Failed to fetch ledger peers!"); - } + auto &block = + boost::get>(&block_var)->value; + auto block_height = block->height(); - auto initial_ledger_state = std::make_shared( - std::move(peers.value()), block->height(), block->hash()); + auto peers = storage->createPeerQuery() | + [](auto &&peer_query) { return peer_query->getLedgerPeers(); }; + if (not peers) { + return expected::makeError("Failed to fetch ledger peers!"); + } - pcs->onSynchronization().subscribe( - ordering_init.sync_event_notifier.get_subscriber()); - storage->on_commit().subscribe( - ordering_init.commit_notifier.get_subscriber()); + auto initial_ledger_state = storage->getLedgerState(); + if (not initial_ledger_state) { + return expected::makeError("Failed to fetch ledger state!"); + } - ordering_init.commit_notifier.get_subscriber().on_next(std::move(block)); + ordering_init->processCommittedBlock(std::move(block)); + + subscription_engine_->dispatcher()->add( + iroha::SubscriptionEngineHandlers::kYac, + [synchronizer(utils::make_weak(synchronizer)), + ordering_init(utils::make_weak(ordering_init)), + yac_init(utils::make_weak(yac_init)), + log(utils::make_weak(log_)), + subscription(utils::make_weak(getSubscription())), + block_height, + initial_ledger_state] { + auto maybe_synchronizer = synchronizer.lock(); + auto maybe_ordering_init = ordering_init.lock(); + auto maybe_yac_init = yac_init.lock(); + auto maybe_log = log.lock(); + auto maybe_subscription = subscription.lock(); + if (maybe_synchronizer and maybe_ordering_init and maybe_yac_init + and maybe_log and maybe_subscription) { + ProcessGateObjectContext context{std::move(maybe_synchronizer), + std::move(maybe_ordering_init), + std::move(maybe_yac_init), + std::move(maybe_log), + std::move(maybe_subscription)}; + consensus::Round initial_round{block_height, + ordering::kFirstRejectRound}; + auto round_switch = + context.ordering_init->processSynchronizationEvent( + {SynchronizationOutcomeType::kCommit, + initial_round, + *initial_ledger_state}); + if (auto maybe_object = context.yac_init->processRoundSwitch( + round_switch.next_round, round_switch.ledger_state)) { + auto round = [](auto &object) { return object.round; }; + context.log->info("Ignoring object with {} because {} is newer", + initial_round, + std::visit(round, *maybe_object)); + return processGateObject(std::move(context), *maybe_object); + } + context.ordering_init->processRoundSwitch(round_switch); + } + }); - ordering_init.sync_event_notifier.get_subscriber().on_next( - synchronizer::SynchronizationEvent{ - SynchronizationOutcomeType::kCommit, - {block_height, ordering::kFirstRejectRound}, - initial_ledger_state}); - return {}; - }; + return {}; } diff --git a/irohad/main/application.hpp b/irohad/main/application.hpp index 556eb5eac06..7b4c9b1fda4 100644 --- a/irohad/main/application.hpp +++ b/irohad/main/application.hpp @@ -17,24 +17,30 @@ #include "logger/logger_fwd.hpp" #include "logger/logger_manager_fwd.hpp" #include "main/impl/block_loader_init.hpp" -#include "main/impl/on_demand_ordering_init.hpp" #include "main/iroha_conf_loader.hpp" #include "main/server_runner.hpp" #include "main/startup_params.hpp" +#include "main/subscription_fwd.hpp" #include "multi_sig_transactions/gossip_propagation_strategy_params.hpp" #include "torii/tls_params.hpp" +namespace google::protobuf { + class Empty; +} + namespace iroha { class PendingTransactionStorage; - class PendingTransactionStorageInit; class MstProcessor; + class MstStorage; namespace ametsuchi { class WsvRestorer; class TxPresenceCache; class Storage; class ReconnectionStrategyFactory; class PostgresOptions; + class RocksDbOptions; struct PoolWrapper; + struct RocksDBContext; class VmCaller; } // namespace ametsuchi namespace consensus { @@ -43,6 +49,8 @@ namespace iroha { } // namespace yac } // namespace consensus namespace network { + template + class AsyncGrpcClient; class BlockLoader; class ChannelPool; class GenericClientFactory; @@ -54,7 +62,11 @@ namespace iroha { struct GrpcChannelParams; struct TlsCredentials; } // namespace network + namespace ordering { + class OnDemandOrderingInit; + } namespace protocol { + class Proposal; class Query; class BlocksQuery; } // namespace protocol @@ -70,7 +82,7 @@ namespace iroha { class CommandService; class CommandServiceTransportGrpc; class QueryService; - + class TransactionProcessor; struct TlsParams; } // namespace torii namespace validation { @@ -84,8 +96,12 @@ namespace shared_model { class Keypair; } namespace interface { + template + class AbstractTransportFactory; + class Proposal; class QueryResponseFactory; class TransactionBatchFactory; + class TransactionBatchParser; } // namespace interface namespace validation { struct Settings; @@ -98,56 +114,32 @@ class Irohad { /** * Constructor that initializes common iroha pipeline - * @param block_store_dir - folder where blocks will be stored * @param pg_opt - connection options for PostgresSQL * @param listen_ip - ip address for opening ports (internal & torii) - * @param torii_port - port for torii binding - * @param internal_port - port for internal communication - ordering service, - * consensus, and block loader - * @param max_proposal_size - maximum transactions that possible appears in - * one proposal - * @param proposal_delay - maximum waiting time util emitting new proposal - * @param vote_delay - waiting time before sending vote to next peer - * @param mst_expiration_time - maximum time until until MST transaction is * not considered as expired (in minutes) * @param keypair - public and private keys for crypto signer - * @param max_rounds_delay - maximum delay between consecutive rounds without - * transactions - * @param stale_stream_max_rounds - maximum number of rounds between - * consecutive status emissions - * @param opt_alternative_peers - optional alternative initial peers list * @param logger_manager - the logger manager to use * @param startup_wsv_data_policy - @see StartupWsvDataPolicy - * @param grpc_channel_params - parameters for all grpc clients + * @param maybe_grpc_channel_params - parameters for all grpc clients + * (optional). Default gRPC configuration is used if not provided * @param opt_mst_gossip_params - parameters for Gossip MST propagation * (optional). If not provided, disables mst processing support - * @param torii_tls_params - optional TLS params for torii. * @see iroha::torii::TlsParams * @param inter_peer_tls_config - set up TLS in peer-to-peer communication * TODO mboldyrev 03.11.2018 IR-1844 Refactor the constructor. */ - Irohad(const boost::optional &block_store_dir, + Irohad(const IrohadConfig &config, std::unique_ptr pg_opt, + std::unique_ptr rdb_opt, const std::string &listen_ip, - size_t torii_port, - size_t internal_port, - size_t max_proposal_size, - std::chrono::milliseconds proposal_delay, - std::chrono::milliseconds vote_delay, - std::chrono::minutes mst_expiration_time, - const shared_model::crypto::Keypair &keypair, - std::chrono::milliseconds max_rounds_delay, - size_t stale_stream_max_rounds, - boost::optional - opt_alternative_peers, + const boost::optional &keypair, logger::LoggerManagerTreePtr logger_manager, iroha::StartupWsvDataPolicy startup_wsv_data_policy, - std::shared_ptr - grpc_channel_params, + iroha::StartupWsvSynchronizationPolicy startup_wsv_sync_policy, + std::optional> + maybe_grpc_channel_params, const boost::optional - &opt_mst_gossip_params = boost::none, - const boost::optional &torii_tls_params = - boost::none, + &opt_mst_gossip_params, boost::optional inter_peer_tls_config = boost::none); @@ -185,7 +177,8 @@ class Irohad { protected: // -----------------------| component initialization |------------------------ virtual RunResult initStorage( - iroha::StartupWsvDataPolicy startup_wsv_data_policy); + iroha::StartupWsvDataPolicy startup_wsv_data_policy, + iroha::StorageType type); RunResult initTlsCredentials(); @@ -241,21 +234,12 @@ class Irohad { virtual RunResult initWsvRestorer(); // constructor dependencies - const boost::optional block_store_dir_; + IrohadConfig config_; const std::string listen_ip_; - size_t torii_port_; - boost::optional torii_tls_params_; - size_t internal_port_; - size_t max_proposal_size_; - std::chrono::milliseconds proposal_delay_; - std::chrono::milliseconds vote_delay_; - bool is_mst_supported_; - std::chrono::minutes mst_expiration_time_; - std::chrono::milliseconds max_rounds_delay_; - size_t stale_stream_max_rounds_; - const boost::optional - opt_alternative_peers_; - std::shared_ptr grpc_channel_params_; + boost::optional keypair_; + iroha::StartupWsvSynchronizationPolicy startup_wsv_sync_policy_; + std::optional> + maybe_grpc_channel_params_; boost::optional opt_mst_gossip_params_; boost::optional inter_peer_tls_config_; @@ -268,9 +252,6 @@ class Irohad { std::shared_ptr> peer_tls_certificates_provider_; - std::unique_ptr - pending_txs_storage_init; - // pending transactions storage std::shared_ptr pending_txs_storage_; @@ -280,18 +261,19 @@ class Irohad { // ------------------------| internal dependencies |------------------------- std::optional> vm_caller_; + std::shared_ptr db_context_; public: - shared_model::crypto::Keypair keypair; std::unique_ptr pg_opt_; + std::unique_ptr rdb_opt_; std::shared_ptr storage; protected: - rxcpp::observable finalized_txs_; + std::shared_ptr subscription_engine_; // initialization objects - iroha::ordering::OnDemandOrderingInit ordering_init; - std::unique_ptr yac_init; + std::shared_ptr ordering_init; + std::shared_ptr yac_init; iroha::network::BlockLoaderInit loader_init; // IR-907 14.09.2020 @lebdron: remove it from here @@ -382,10 +364,12 @@ class Irohad { std::shared_ptr status_bus_; // mst + std::shared_ptr mst_storage; std::shared_ptr mst_transport; std::shared_ptr mst_processor; // transaction service + std::shared_ptr tx_processor; std::shared_ptr command_service; std::shared_ptr command_service_transport; @@ -395,9 +379,6 @@ class Irohad { // consensus gate std::shared_ptr consensus_gate; - rxcpp::composite_subscription consensus_gate_objects_lifetime; - rxcpp::subjects::subject consensus_gate_objects; - rxcpp::composite_subscription consensus_gate_events_subscription; std::unique_ptr torii_server; boost::optional> diff --git a/irohad/main/impl/async_dispatcher.cpp b/irohad/main/impl/async_dispatcher.cpp new file mode 100644 index 00000000000..509e8fb2859 --- /dev/null +++ b/irohad/main/impl/async_dispatcher.cpp @@ -0,0 +1,18 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "main/subscription.hpp" + +#include "subscription/async_dispatcher_impl.hpp" + +namespace iroha { + + std::shared_ptr getDispatcher() { + return std::make_shared< + subscription::AsyncDispatcher>(); + } + +} // namespace iroha diff --git a/irohad/main/impl/consensus_init.cpp b/irohad/main/impl/consensus_init.cpp index 65708ae608c..2e9414de5d5 100644 --- a/irohad/main/impl/consensus_init.cpp +++ b/irohad/main/impl/consensus_init.cpp @@ -17,128 +17,143 @@ #include "consensus/yac/transport/impl/network_impl.hpp" #include "consensus/yac/yac.hpp" #include "logger/logger_manager.hpp" +#include "main/subscription.hpp" #include "network/impl/client_factory_impl.hpp" -using namespace iroha::consensus; -using namespace iroha::consensus::yac; +using iroha::consensus::yac::YacInit; namespace { - auto createPeerOrderer( - std::shared_ptr peer_query_factory) { - return std::make_shared(peer_query_factory); - } - auto createCryptoProvider(const shared_model::crypto::Keypair &keypair, logger::LoggerPtr log) { - auto crypto = std::make_shared(keypair, std::move(log)); + auto crypto = std::make_shared( + keypair, std::move(log)); return crypto; } auto createHashProvider() { - return std::make_shared(); + return std::make_shared(); + } + + auto createNetwork( + std::shared_ptr client_factory, + logger::LoggerPtr log) { + return std::make_shared( + std::make_unique>( + std::move(client_factory)), + log); } - std::shared_ptr createYac( - ClusterOrdering initial_order, - Round initial_round, + std::shared_ptr createYac( + shared_model::interface::types::PeerList initial_order, + iroha::consensus::Round initial_round, const shared_model::crypto::Keypair &keypair, - std::shared_ptr timer, - std::shared_ptr network, - ConsistencyModel consistency_model, - rxcpp::observe_on_one_worker coordination, + std::shared_ptr timer, + std::shared_ptr network, + iroha::consensus::yac::ConsistencyModel consistency_model, const logger::LoggerManagerTreePtr &consensus_log_manager) { std::shared_ptr cleanup_strategy = std::make_shared(); - return Yac::create( - YacVoteStorage(cleanup_strategy, - getSupermajorityChecker(consistency_model), - consensus_log_manager->getChild("VoteStorage")), + return iroha::consensus::yac::Yac::create( + iroha::consensus::yac::YacVoteStorage( + cleanup_strategy, + getSupermajorityChecker(consistency_model), + consensus_log_manager->getChild("VoteStorage")), std::move(network), createCryptoProvider( keypair, consensus_log_manager->getChild("Crypto")->getLogger()), std::move(timer), initial_order, initial_round, - coordination, consensus_log_manager->getChild("HashGate")->getLogger()); } } // namespace -namespace iroha { - namespace consensus { - namespace yac { - - std::shared_ptr YacInit::getConsensusNetwork() const { - BOOST_ASSERT_MSG(initialized_, - "YacInit::initConsensusGate(...) must be called prior " - "to YacInit::getConsensusNetwork()!"); - return consensus_network_; - } - - auto YacInit::createTimer(std::chrono::milliseconds delay_milliseconds) { - return std::make_shared( - delay_milliseconds, - // TODO 2019-04-10 andrei: IR-441 Share a thread between MST and YAC - rxcpp::observe_on_new_thread()); - } - - std::shared_ptr YacInit::initConsensusGate( - Round initial_round, - std::shared_ptr - peer_query_factory, - boost::optional - alternative_peers, - std::shared_ptr block_creator, - std::shared_ptr block_loader, - const shared_model::crypto::Keypair &keypair, - std::shared_ptr - consensus_result_cache, - std::chrono::milliseconds vote_delay_milliseconds, - std::shared_ptr< - iroha::network::AsyncGrpcClient> - async_call, - ConsistencyModel consistency_model, - const logger::LoggerManagerTreePtr &consensus_log_manager, - std::chrono::milliseconds delay, - std::shared_ptr - client_factory) { - auto peer_orderer = createPeerOrderer(peer_query_factory); - auto peers = peer_query_factory->createPeerQuery() | - [](auto &&peer_query) { return peer_query->getLedgerPeers(); }; - - consensus_network_ = std::make_shared( - async_call, - std::make_unique< - iroha::network::ClientFactoryImpl>( - std::move(client_factory)), - consensus_log_manager->getChild("Network")->getLogger()); - - auto yac = createYac(*ClusterOrdering::create(peers.value()), - initial_round, - keypair, - createTimer(vote_delay_milliseconds), - consensus_network_, - consistency_model, - rxcpp::observe_on_new_thread(), - consensus_log_manager); - consensus_network_->subscribe(yac); - - auto hash_provider = createHashProvider(); - - initialized_ = true; - - return std::make_shared( - std::move(yac), - std::move(peer_orderer), - alternative_peers | - [](auto &peers) { return ClusterOrdering::create(peers); }, - hash_provider, - block_creator, - std::move(consensus_result_cache), - consensus_log_manager->getChild("Gate")->getLogger(), - ConsensusOutcomeDelay(delay)); - } - } // namespace yac - } // namespace consensus -} // namespace iroha +std::shared_ptr +YacInit::getConsensusNetwork() const { + BOOST_ASSERT_MSG(initialized_, + "YacInit::initConsensusGate(...) must be called prior " + "to YacInit::getConsensusNetwork()!"); + return consensus_network_; +} + +void YacInit::subscribe(std::function callback) { + BOOST_ASSERT_MSG(initialized_, + "YacInit::initConsensusGate(...) must be called prior " + "to YacInit::subscribe()!"); + states_subscription_ = + SubscriberCreator>::template create< + EventTypes::kOnState>( + iroha::SubscriptionEngineHandlers::kYac, + [yac(utils::make_weak(yac_)), + yac_gate(utils::make_weak(yac_gate_)), + callback(std::move(callback))](auto, auto state) { + auto maybe_yac = yac.lock(); + auto maybe_yac_gate = yac_gate.lock(); + if (not(maybe_yac and maybe_yac_gate)) { + return; + } + auto maybe_answer = maybe_yac->onState(std::move(state)); + if (not maybe_answer) { + return; + } + auto maybe_outcome = + maybe_yac_gate->processOutcome(*std::move(maybe_answer)); + if (maybe_outcome) { + callback(*std::move(maybe_outcome)); + } + }); +} + +std::optional YacInit::processRoundSwitch( + consensus::Round const &round, + std::shared_ptr ledger_state) { + return yac_gate_->processRoundSwitch(round, std::move(ledger_state)); +} + +auto YacInit::createTimer(std::chrono::milliseconds delay_milliseconds) { + return std::make_shared(delay_milliseconds); +} + +std::shared_ptr YacInit::initConsensusGate( + Round initial_round, + std::optional alternative_peers, + std::shared_ptr ledger_state, + std::shared_ptr block_loader, + const shared_model::crypto::Keypair &keypair, + std::shared_ptr consensus_result_cache, + std::chrono::milliseconds vote_delay_milliseconds, + ConsistencyModel consistency_model, + const logger::LoggerManagerTreePtr &consensus_log_manager, + std::shared_ptr client_factory) { + consensus_network_ = std::make_shared( + consensus_log_manager->getChild("Service")->getLogger(), + [](std::vector state) { + getSubscription()->notify(EventTypes::kOnState, std::move(state)); + }); + + yac_ = createYac( + ledger_state->ledger_peers, + initial_round, + keypair, + createTimer(vote_delay_milliseconds), + createNetwork(client_factory, + consensus_log_manager->getChild("Network")->getLogger()), + consistency_model, + consensus_log_manager); + auto hash_provider = createHashProvider(); + + initialized_ = true; + + yac_gate_ = std::make_shared( + yac_, + std::make_shared(), + alternative_peers | + [](auto &peers) { return ClusterOrdering::create(peers); }, + std::move(ledger_state), + hash_provider, + std::move(consensus_result_cache), + consensus_log_manager->getChild("Gate")->getLogger()); + return yac_gate_; +} diff --git a/irohad/main/impl/consensus_init.hpp b/irohad/main/impl/consensus_init.hpp index f8000aea375..b6b1f446af4 100644 --- a/irohad/main/impl/consensus_init.hpp +++ b/irohad/main/impl/consensus_init.hpp @@ -8,61 +8,62 @@ #include -#include "ametsuchi/peer_query_factory.hpp" #include "consensus/consensus_block_cache.hpp" +#include "consensus/gate_object.hpp" #include "consensus/yac/consensus_outcome_type.hpp" #include "consensus/yac/consistency_model.hpp" #include "consensus/yac/outcome_messages.hpp" #include "consensus/yac/timer.hpp" -#include "consensus/yac/transport/impl/network_impl.hpp" +#include "consensus/yac/transport/impl/consensus_service_impl.hpp" #include "consensus/yac/yac_gate.hpp" #include "consensus/yac/yac_hash_provider.hpp" #include "consensus/yac/yac_peer_orderer.hpp" #include "cryptography/keypair.hpp" #include "logger/logger_manager_fwd.hpp" +#include "main/subscription_fwd.hpp" #include "network/block_loader.hpp" -#include "network/impl/async_grpc_client.hpp" -#include "simulator/block_creator.hpp" -namespace iroha { - namespace network { - class GenericClientFactory; - } - namespace consensus { - namespace yac { +namespace iroha::network { + class GenericClientFactory; +} - class YacInit { - public: - std::shared_ptr initConsensusGate( - Round initial_round, - // TODO 30.01.2019 lebdron: IR-262 Remove PeerQueryFactory - std::shared_ptr peer_query_factory, - boost::optional - alternative_peers, - std::shared_ptr block_creator, - std::shared_ptr block_loader, - const shared_model::crypto::Keypair &keypair, - std::shared_ptr block_cache, - std::chrono::milliseconds vote_delay_milliseconds, - std::shared_ptr< - iroha::network::AsyncGrpcClient> - async_call, - ConsistencyModel consistency_model, - const logger::LoggerManagerTreePtr &consensus_log_manager, - std::chrono::milliseconds delay, - std::shared_ptr - client_factory); +namespace iroha::consensus::yac { + class Yac; + class YacGateImpl; - std::shared_ptr getConsensusNetwork() const; + class YacInit { + public: + std::shared_ptr initConsensusGate( + Round initial_round, + std::optional + alternative_peers, + std::shared_ptr ledger_state, + std::shared_ptr block_loader, + const shared_model::crypto::Keypair &keypair, + std::shared_ptr block_cache, + std::chrono::milliseconds vote_delay_milliseconds, + ConsistencyModel consistency_model, + const logger::LoggerManagerTreePtr &consensus_log_manager, + std::shared_ptr client_factory); - private: - auto createTimer(std::chrono::milliseconds delay_milliseconds); + std::shared_ptr getConsensusNetwork() const; - bool initialized_{false}; - std::shared_ptr consensus_network_; - }; - } // namespace yac - } // namespace consensus -} // namespace iroha + void subscribe(std::function callback); + + std::optional processRoundSwitch( + consensus::Round const &round, + std::shared_ptr ledger_state); + + private: + auto createTimer(std::chrono::milliseconds delay_milliseconds); + + bool initialized_{false}; + std::shared_ptr consensus_network_; + std::shared_ptr yac_; + std::shared_ptr yac_gate_; + std::shared_ptr>> + states_subscription_; + }; +} // namespace iroha::consensus::yac #endif // IROHA_CONSENSUS_INIT_HPP diff --git a/irohad/main/impl/on_demand_ordering_init.cpp b/irohad/main/impl/on_demand_ordering_init.cpp index d6531dc8007..d4d308e8a24 100644 --- a/irohad/main/impl/on_demand_ordering_init.cpp +++ b/irohad/main/impl/on_demand_ordering_init.cpp @@ -5,16 +5,11 @@ #include "main/impl/on_demand_ordering_init.hpp" -#include -#include -#include -#include -#include -#include #include "common/permutation_generator.hpp" #include "interfaces/iroha_internal/block.hpp" #include "logger/logger.hpp" #include "logger/logger_manager.hpp" +#include "main/subscription.hpp" #include "network/impl/client_factory_impl.hpp" #include "ordering/impl/on_demand_common.hpp" #include "ordering/impl/on_demand_connection_manager.hpp" @@ -22,251 +17,73 @@ #include "ordering/impl/on_demand_ordering_service_impl.hpp" #include "ordering/impl/on_demand_os_client_grpc.hpp" #include "ordering/impl/on_demand_os_server_grpc.hpp" -#include "ordering/impl/ordering_gate_cache/on_demand_cache.hpp" #include "synchronizer/synchronizer_common.hpp" -using namespace iroha::ordering; +using iroha::ordering::OnDemandOrderingInit; namespace { /// indexes to permutations for corresponding rounds - enum RoundType { kCurrentRound, kNextRound, kRoundAfterNext, kCount }; + enum RoundType { kCurrentRound, kNextRound, kCount }; template using RoundTypeConstant = std::integral_constant; } // namespace OnDemandOrderingInit::OnDemandOrderingInit(logger::LoggerPtr log) - : sync_event_notifier(sync_event_notifier_lifetime_), - commit_notifier(commit_notifier_lifetime_), - log_(std::move(log)) {} + : log_(std::move(log)) {} /** * Creates notification factory for individual connections to peers with * gRPC backend. \see initOrderingGate for parameters */ auto createNotificationFactory( - std::shared_ptr> - async_call, std::shared_ptr proposal_transport_factory, std::chrono::milliseconds delay, const logger::LoggerManagerTreePtr &ordering_log_manager, std::shared_ptr client_factory) { - return std::make_shared( - std::move(async_call), + return std::make_shared< + iroha::ordering::transport::OnDemandOsClientGrpcFactory>( std::move(proposal_transport_factory), [] { return std::chrono::system_clock::now(); }, delay, ordering_log_manager->getChild("NetworkClient")->getLogger(), std::make_unique>( - std::move(client_factory))); + iroha::ordering::transport::OnDemandOsClientGrpcFactory::Service>>( + std::move(client_factory)), + [](iroha::ordering::ProposalEvent event) { + iroha::getSubscription()->notify(iroha::EventTypes::kOnProposalResponse, + std::move(event)); + }); } auto OnDemandOrderingInit::createConnectionManager( - std::shared_ptr> - async_call, std::shared_ptr proposal_transport_factory, std::chrono::milliseconds delay, - std::vector initial_hashes, const logger::LoggerManagerTreePtr &ordering_log_manager, std::shared_ptr client_factory) { - // since top block will be the first in commit_notifier observable, - // hashes of two previous blocks are prepended - const size_t kBeforePreviousTop = 0, kPreviousTop = 1; - - // flat map hashes from committed blocks - rxcpp::observable> - blocks = commit_notifier.get_observable(); - rxcpp::observable block_hashes = blocks.map( - [](std::shared_ptr const &block) { - return block->hash(); - }); - // prepend hashes for the first two rounds - rxcpp::observable all_hashes = - block_hashes.start_with(initial_hashes.at(kBeforePreviousTop), - initial_hashes.at(kPreviousTop)); - - // emit last k + 1 hashes, where k is the delay parameter - // current implementation assumes k = 2 - // first hash is used for kCurrentRound - // second hash is used for kNextRound - // third hash is used for kRoundAfterNext - rxcpp::observable hashes_without_first = - all_hashes.skip(1); - rxcpp::observable hashes_without_first_two = - all_hashes.skip(2); - rxcpp::observable> - latest_hashes = - all_hashes.zip(hashes_without_first, hashes_without_first_two); - - auto map_peers = - [this](auto &&latest_data) -> OnDemandConnectionManager::CurrentPeers { - auto &latest_commit = std::get<0>(latest_data); - auto ¤t_hashes = std::get<1>(latest_data); - - iroha::consensus::Round current_round = latest_commit.round; - - auto ¤t_peers = latest_commit.ledger_state->ledger_peers; - - /// permutations for peers lists - std::array, kCount> permutations; - - // generate permutation of peers list from corresponding round - // hash - auto generate_permutation = [&](auto round) { - auto &hash = std::get(current_hashes); - log_->debug("Using hash: {}", hash.toString()); - - auto prng = iroha::makeSeededPrng(hash.blob().data(), hash.blob().size()); - iroha::generatePermutation( - permutations[round()], std::move(prng), current_peers.size()); - }; - - generate_permutation(RoundTypeConstant{}); - generate_permutation(RoundTypeConstant{}); - generate_permutation(RoundTypeConstant{}); - - using iroha::synchronizer::SynchronizationOutcomeType; - switch (latest_commit.sync_outcome) { - case SynchronizationOutcomeType::kCommit: - current_round = nextCommitRound(current_round); - break; - case SynchronizationOutcomeType::kReject: - case SynchronizationOutcomeType::kNothing: - current_round = nextRejectRound(current_round); - break; - default: - BOOST_ASSERT_MSG(false, "Unknown value"); - } - - auto getOsPeer = [&](auto block_round_advance, auto reject_round) { - auto &permutation = permutations[block_round_advance]; - // since reject round can be greater than number of peers, wrap it - // with number of peers - auto &peer = - current_peers[permutation[reject_round % permutation.size()]]; - log_->debug( - "For {}, using OS on peer: {}", - iroha::consensus::Round{ - current_round.block_round + block_round_advance, reject_round}, - *peer); - return peer; - }; - - OnDemandConnectionManager::CurrentPeers peers; - /* - * See detailed description in - * irohad/ordering/impl/on_demand_connection_manager.cpp - * - * 0 1 2 0 1 2 0 1 2 0 1 2 - * 0 o x v 0 o . . 0 o x . 0 o . . - * 1 . . . 1 x v . 1 v . . 1 x . . - * 2 . . . 2 . . . 2 . . . 2 v . . - * RejectReject CommitReject RejectCommit CommitCommit - * - * o - current round, x - next round, v - target round - * - * v, round 0,2 - kRejectRejectConsumer - * v, round 1,1 - kCommitRejectConsumer - * v, round 1,0 - kRejectCommitConsumer - * v, round 2,0 - kCommitCommitConsumer - * o, round 0,0 - kIssuer - */ - peers.peers.at(OnDemandConnectionManager::kRejectRejectConsumer) = - getOsPeer(kCurrentRound, - currentRejectRoundConsumer(current_round.reject_round)); - peers.peers.at(OnDemandConnectionManager::kRejectCommitConsumer) = - getOsPeer(kNextRound, kNextCommitRoundConsumer); - peers.peers.at(OnDemandConnectionManager::kCommitRejectConsumer) = - getOsPeer(kNextRound, kNextRejectRoundConsumer); - peers.peers.at(OnDemandConnectionManager::kCommitCommitConsumer) = - getOsPeer(kRoundAfterNext, kNextCommitRoundConsumer); - peers.peers.at(OnDemandConnectionManager::kIssuer) = - getOsPeer(kCurrentRound, current_round.reject_round); - return peers; - }; - - rxcpp::observable sync_events = - sync_event_notifier.get_observable(); - rxcpp::observable>> - sync_events_with_hashes = sync_events.with_latest_from(latest_hashes); - rxcpp::observable peers = - sync_events_with_hashes.map(map_peers); - - return std::make_unique( - createNotificationFactory(std::move(async_call), - std::move(proposal_transport_factory), + connection_manager_ = std::make_unique( + createNotificationFactory(std::move(proposal_transport_factory), delay, ordering_log_manager, std::move(client_factory)), - peers, ordering_log_manager->getChild("ConnectionManager")->getLogger()); + return connection_manager_; } auto OnDemandOrderingInit::createGate( std::shared_ptr ordering_service, - std::unique_ptr network_client, + std::shared_ptr network_client, std::shared_ptr proposal_factory, std::shared_ptr tx_cache, - std::shared_ptr creation_strategy, size_t max_number_of_transactions, const logger::LoggerManagerTreePtr &ordering_log_manager) { return std::make_shared( std::move(ordering_service), std::move(network_client), - commit_notifier.get_observable().map( - [this](std::shared_ptr const - &block) - -> std::shared_ptr< - const cache::OrderingGateCache::HashesSetType> { - // take committed & rejected transaction hashes from committed - // block - log_->debug("Committed block handle: height {}.", block->height()); - auto hashes = - std::make_shared(); - for (shared_model::interface::Transaction const &tx : - block->transactions()) { - hashes->insert(tx.hash()); - } - for (shared_model::crypto::Hash const &hash : - block->rejected_transactions_hashes()) { - hashes->insert(hash); - } - return hashes; - }), - sync_event_notifier.get_observable().map( - [this](synchronizer::SynchronizationEvent const &event) { - consensus::Round current_round; - switch (event.sync_outcome) { - case iroha::synchronizer::SynchronizationOutcomeType::kCommit: - log_->debug("Sync event on {}: commit.", event.round); - current_round = ordering::nextCommitRound(event.round); - break; - case iroha::synchronizer::SynchronizationOutcomeType::kReject: - log_->debug("Sync event on {}: reject.", event.round); - current_round = ordering::nextRejectRound(event.round); - break; - case iroha::synchronizer::SynchronizationOutcomeType::kNothing: - log_->debug("Sync event on {}: nothing.", event.round); - current_round = ordering::nextRejectRound(event.round); - break; - default: - log_->error("unknown SynchronizationOutcomeType"); - assert(false); - } - return ordering::OnDemandOrderingGate::RoundSwitch{ - std::move(current_round), event.ledger_state}; - }), std::move(proposal_factory), std::move(tx_cache), - std::move(creation_strategy), max_number_of_transactions, ordering_log_manager->getChild("Gate")->getLogger()); } @@ -276,63 +93,174 @@ auto OnDemandOrderingInit::createService( std::shared_ptr proposal_factory, std::shared_ptr tx_cache, - std::shared_ptr creation_strategy, const logger::LoggerManagerTreePtr &ordering_log_manager) { - return std::make_shared( + ordering_service_ = std::make_shared( max_number_of_transactions, std::move(proposal_factory), std::move(tx_cache), - creation_strategy, ordering_log_manager->getChild("Service")->getLogger()); -} - -OnDemandOrderingInit::~OnDemandOrderingInit() { - sync_event_notifier_lifetime_.unsubscribe(); - commit_notifier_lifetime_.unsubscribe(); + return ordering_service_; } std::shared_ptr OnDemandOrderingInit::initOrderingGate( size_t max_number_of_transactions, std::chrono::milliseconds delay, - std::vector initial_hashes, std::shared_ptr transaction_factory, std::shared_ptr batch_parser, std::shared_ptr transaction_batch_factory, - std::shared_ptr> - async_call, std::shared_ptr proposal_factory, std::shared_ptr proposal_transport_factory, std::shared_ptr tx_cache, - std::shared_ptr creation_strategy, logger::LoggerManagerTreePtr ordering_log_manager, - std::shared_ptr client_factory) { + std::shared_ptr client_factory, + std::chrono::milliseconds proposal_creation_timeout) { auto ordering_service = createService(max_number_of_transactions, proposal_factory, tx_cache, - creation_strategy, ordering_log_manager); service = std::make_shared( ordering_service, std::move(transaction_factory), std::move(batch_parser), std::move(transaction_batch_factory), - ordering_log_manager->getChild("Server")->getLogger()); - return createGate( - ordering_service, - createConnectionManager(std::move(async_call), - std::move(proposal_transport_factory), - delay, - std::move(initial_hashes), - ordering_log_manager, - std::move(client_factory)), - std::move(proposal_factory), - std::move(tx_cache), - std::move(creation_strategy), - max_number_of_transactions, - ordering_log_manager); + ordering_log_manager->getChild("Server")->getLogger(), + proposal_creation_timeout); + ordering_gate_ = + createGate(ordering_service, + createConnectionManager(std::move(proposal_transport_factory), + delay, + ordering_log_manager, + std::move(client_factory)), + std::move(proposal_factory), + std::move(tx_cache), + max_number_of_transactions, + ordering_log_manager); + return ordering_gate_; +} + +iroha::ordering::RoundSwitch OnDemandOrderingInit::processSynchronizationEvent( + synchronizer::SynchronizationEvent event) { + iroha::consensus::Round current_round = event.round; + + auto ¤t_peers = event.ledger_state->ledger_peers; + + /// permutations for peers lists + std::array, kCount> permutations; + + // generate permutation of peers list from corresponding round + // hash + auto generate_permutation = [&](auto &hash, auto round) { + log_->debug("Using hash: {}", hash.toString()); + + auto prng = iroha::makeSeededPrng(hash.blob().data(), hash.blob().size()); + iroha::generatePermutation( + permutations[round()], std::move(prng), current_peers.size()); + }; + + generate_permutation(previous_hash_, RoundTypeConstant{}); + generate_permutation(current_hash_, RoundTypeConstant{}); + + using iroha::synchronizer::SynchronizationOutcomeType; + switch (event.sync_outcome) { + case SynchronizationOutcomeType::kCommit: + current_round = nextCommitRound(current_round); + break; + case SynchronizationOutcomeType::kReject: + case SynchronizationOutcomeType::kNothing: + current_round = nextRejectRound(current_round); + break; + default: + BOOST_ASSERT_MSG(false, "Unknown value"); + } + + auto getOsPeer = [&](auto block_round_advance, auto reject_round) { + auto &permutation = permutations[block_round_advance]; + // since reject round can be greater than number of peers, wrap it + // with number of peers + auto &peer = current_peers[permutation[reject_round % permutation.size()]]; + log_->debug( + "For {}, using OS on peer: {}", + iroha::consensus::Round{current_round.block_round + block_round_advance, + reject_round}, + *peer); + return peer; + }; + + OnDemandConnectionManager::CurrentPeers peers; + /* + * See detailed description in + * irohad/ordering/impl/on_demand_connection_manager.cpp + * + * 0 1 0 1 0 1 + * 0 o . 0 o x 0 o . + * 1 . . 1 . . 1 x . + * Issuer Reject Commit + * + * o - current round, x - next round, v - target round + * + * v, round 0,1 - kRejectConsumer + * v, round 1,0 - kCommitConsumer + * o, round 0,0 - kIssuer + */ + peers.peers.at(OnDemandConnectionManager::kRejectConsumer) = + getOsPeer(kCurrentRound, nextRejectRound(current_round).reject_round); + peers.peers.at(OnDemandConnectionManager::kCommitConsumer) = + getOsPeer(kNextRound, nextCommitRound(current_round).reject_round); + peers.peers.at(OnDemandConnectionManager::kIssuer) = + getOsPeer(kCurrentRound, current_round.reject_round); + + connection_manager_->initializeConnections(peers); + + return {std::move(current_round), event.ledger_state}; +} + +void OnDemandOrderingInit::processRoundSwitch( + iroha::ordering::RoundSwitch const &event) { + ordering_gate_->processRoundSwitch(event); +} + +void OnDemandOrderingInit::processCommittedBlock( + std::shared_ptr block) { + previous_hash_ = block->prevHash(); + current_hash_ = block->hash(); + + // take committed & rejected transaction hashes from committed block + log_->debug("Committed block handle: height {}.", block->height()); + auto hashes = std::make_shared(); + for (shared_model::interface::Transaction const &tx : block->transactions()) { + hashes->insert(tx.hash()); + } + for (shared_model::crypto::Hash const &hash : + block->rejected_transactions_hashes()) { + hashes->insert(hash); + } + ordering_service_->onTxsCommitted(*hashes); +} + +void OnDemandOrderingInit::subscribe( + std::function callback) { + proposals_subscription_ = + SubscriberCreator::template create< + EventTypes::kOnProposalResponse>( + iroha::SubscriptionEngineHandlers::kYac, + [ordering_gate(utils::make_weak(ordering_gate_)), + callback(std::move(callback))](auto, auto event) { + auto maybe_ordering_gate = ordering_gate.lock(); + if (not maybe_ordering_gate) { + return; + } + auto maybe_event = + maybe_ordering_gate->processProposalRequest(std::move(event)); + if (not maybe_event) { + return; + } + if (maybe_event) { + callback(*std::move(maybe_event)); + } + }); } diff --git a/irohad/main/impl/on_demand_ordering_init.hpp b/irohad/main/impl/on_demand_ordering_init.hpp index cb67384e251..2943ad2ded2 100644 --- a/irohad/main/impl/on_demand_ordering_init.hpp +++ b/irohad/main/impl/on_demand_ordering_init.hpp @@ -9,16 +9,12 @@ #include #include -#include +#include "cryptography/hash.hpp" #include "interfaces/common_objects/types.hpp" #include "logger/logger_fwd.hpp" #include "logger/logger_manager_fwd.hpp" - -namespace google { - namespace protobuf { - class Empty; - } -} // namespace google +#include "main/subscription_fwd.hpp" +#include "ordering/impl/round_switch.hpp" namespace grpc { class Service; @@ -40,8 +36,7 @@ namespace shared_model { namespace iroha { namespace network { class GenericClientFactory; - template - class AsyncGrpcClient; + struct OrderingEvent; class OrderingGate; } // namespace network namespace protocol { @@ -54,135 +49,127 @@ namespace iroha { namespace synchronizer { struct SynchronizationEvent; } - namespace ordering { - class OnDemandOrderingService; - class ProposalCreationStrategy; - namespace transport { - class OdOsNotification; - } - namespace cache { - class OrderingGateCache; - } +} // namespace iroha + +namespace iroha::ordering { + class OnDemandConnectionManager; + class OnDemandOrderingGate; + class OnDemandOrderingService; + class ProposalCreationStrategy; + struct ProposalEvent; + namespace transport { + class OdOsNotification; + } + /** + * Encapsulates initialization logic for on-demand ordering gate and service + */ + class OnDemandOrderingInit { + public: + using TransportFactoryType = + shared_model::interface::AbstractTransportFactory< + shared_model::interface::Proposal, + iroha::protocol::Proposal>; + + private: /** - * Encapsulates initialization logic for on-demand ordering gate and service + * Creates connection manager which redirects requests to appropriate + * ordering services in the current round. \see initOrderingGate for + * parameters */ - class OnDemandOrderingInit { - public: - using TransportFactoryType = - shared_model::interface::AbstractTransportFactory< - shared_model::interface::Proposal, - iroha::protocol::Proposal>; - - private: - /** - * Creates connection manager which redirects requests to appropriate - * ordering services in the current round. \see initOrderingGate for - * parameters - */ - auto createConnectionManager( - std::shared_ptr> - async_call, - std::shared_ptr proposal_transport_factory, - std::chrono::milliseconds delay, - std::vector initial_hashes, - const logger::LoggerManagerTreePtr &ordering_log_manager, - std::shared_ptr client_factory); - - /** - * Creates on-demand ordering gate. \see initOrderingGate for parameters - * TODO andrei 31.10.18 IR-1825 Refactor ordering gate observable - */ - auto createGate( - std::shared_ptr ordering_service, - std::unique_ptr network_client, - std::shared_ptr - proposal_factory, - std::shared_ptr tx_cache, - std::shared_ptr creation_strategy, - size_t max_number_of_transactions, - const logger::LoggerManagerTreePtr &ordering_log_manager); - - /** - * Creates on-demand ordering service. \see initOrderingGate for - * parameters - */ - auto createService( - size_t max_number_of_transactions, - std::shared_ptr - proposal_factory, - std::shared_ptr tx_cache, - std::shared_ptr creation_strategy, - const logger::LoggerManagerTreePtr &ordering_log_manager); - - rxcpp::composite_subscription sync_event_notifier_lifetime_; - rxcpp::composite_subscription commit_notifier_lifetime_; - - public: - /// Constructor. - /// @param log - the logger to use for internal messages. - OnDemandOrderingInit(logger::LoggerPtr log); - - ~OnDemandOrderingInit(); - - /** - * Initializes on-demand ordering gate and ordering sevice components - * - * @param max_number_of_transactions maximum number of transactions in a - * proposal - * @param delay timeout for ordering service response on proposal request - * @param initial_hashes seeds for peer list permutations for first k - * rounds they are required since hash of block i defines round i + k - * @param transaction_factory transport factory for transactions required - * by ordering service network endpoint - * @param batch_parser transaction batch parser required by ordering - * service network endpoint - * @param transaction_batch_factory transport factory for transaction - * batch candidates produced by parser - * @param async_call asynchronous gRPC client required for sending batches - * requests to ordering service and processing responses - * @param proposal_factory factory required by ordering service to produce - * proposals - * @param creation_strategy - provides a strategy for creating proposals - * in OS - * @param client_factory - a factory of client stubs - * @return initialized ordering gate - */ - std::shared_ptr initOrderingGate( - size_t max_number_of_transactions, - std::chrono::milliseconds delay, - std::vector initial_hashes, - std::shared_ptr> transaction_factory, - std::shared_ptr - batch_parser, - std::shared_ptr - transaction_batch_factory, - std::shared_ptr> - async_call, - std::shared_ptr - proposal_factory, - std::shared_ptr proposal_transport_factory, - std::shared_ptr tx_cache, - std::shared_ptr creation_strategy, - logger::LoggerManagerTreePtr ordering_log_manager, - std::shared_ptr client_factory); - - /// gRPC service for ordering service - std::shared_ptr service; - - /// commit notifier from peer communication service - rxcpp::subjects::subject - sync_event_notifier; - rxcpp::subjects::subject< - std::shared_ptr> - commit_notifier; - - private: - logger::LoggerPtr log_; - }; - } // namespace ordering -} // namespace iroha + auto createConnectionManager( + std::shared_ptr proposal_transport_factory, + std::chrono::milliseconds delay, + const logger::LoggerManagerTreePtr &ordering_log_manager, + std::shared_ptr client_factory); + + /** + * Creates on-demand ordering gate. \see initOrderingGate for parameters + * TODO andrei 31.10.18 IR-1825 Refactor ordering gate observable + */ + auto createGate( + std::shared_ptr ordering_service, + std::shared_ptr network_client, + std::shared_ptr + proposal_factory, + std::shared_ptr tx_cache, + size_t max_number_of_transactions, + const logger::LoggerManagerTreePtr &ordering_log_manager); + + /** + * Creates on-demand ordering service. \see initOrderingGate for + * parameters + */ + auto createService( + size_t max_number_of_transactions, + std::shared_ptr + proposal_factory, + std::shared_ptr tx_cache, + const logger::LoggerManagerTreePtr &ordering_log_manager); + + public: + /// Constructor. + /// @param log - the logger to use for internal messages. + OnDemandOrderingInit(logger::LoggerPtr log); + + /** + * Initializes on-demand ordering gate and ordering sevice components + * + * @param max_number_of_transactions maximum number of transactions in a + * proposal + * @param delay timeout for ordering service response on proposal request + * @param transaction_factory transport factory for transactions required + * by ordering service network endpoint + * @param batch_parser transaction batch parser required by ordering + * service network endpoint + * @param transaction_batch_factory transport factory for transaction + * batch candidates produced by parser + * @param proposal_factory factory required by ordering service to produce + * proposals + * @param client_factory - a factory of client stubs + * @return initialized ordering gate + */ + std::shared_ptr initOrderingGate( + size_t max_number_of_transactions, + std::chrono::milliseconds delay, + std::shared_ptr> transaction_factory, + std::shared_ptr + batch_parser, + std::shared_ptr + transaction_batch_factory, + std::shared_ptr + proposal_factory, + std::shared_ptr proposal_transport_factory, + std::shared_ptr tx_cache, + logger::LoggerManagerTreePtr ordering_log_manager, + std::shared_ptr client_factory, + std::chrono::milliseconds proposal_creation_timeout); + + iroha::ordering::RoundSwitch processSynchronizationEvent( + synchronizer::SynchronizationEvent event); + + void processRoundSwitch(iroha::ordering::RoundSwitch const &event); + + void processCommittedBlock( + std::shared_ptr block); + + void subscribe( + std::function callback); + + /// gRPC service for ordering service + std::shared_ptr service; + + private: + shared_model::crypto::Hash previous_hash_, current_hash_; + logger::LoggerPtr log_; + std::shared_ptr ordering_service_; + std::shared_ptr connection_manager_; + std::shared_ptr ordering_gate_; + std::shared_ptr> + proposals_subscription_; + }; +} // namespace iroha::ordering #endif // IROHA_ON_DEMAND_ORDERING_INIT_HPP diff --git a/irohad/main/impl/pending_transaction_storage_init.cpp b/irohad/main/impl/pending_transaction_storage_init.cpp deleted file mode 100644 index 10f73aa6fa7..00000000000 --- a/irohad/main/impl/pending_transaction_storage_init.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "main/impl/pending_transaction_storage_init.hpp" - -#include -#include -#include "interfaces/iroha_internal/proposal.hpp" -#include "multi_sig_transactions/mst_processor.hpp" -#include "network/peer_communication_service.hpp" -#include "pending_txs_storage/impl/pending_txs_storage_impl.hpp" - -using namespace iroha; - -PendingTransactionStorageInit::PendingTransactionStorageInit() - : updated_batches(pending_storage_lifetime), - prepared_batch(pending_storage_lifetime), - expired_batch(pending_storage_lifetime), - prepared_txs(pending_storage_lifetime) {} - -std::shared_ptr -PendingTransactionStorageInit::createPendingTransactionsStorage() { - return PendingTransactionStorageImpl::create(updated_batches.get_observable(), - prepared_batch.get_observable(), - expired_batch.get_observable(), - prepared_txs.get_observable(), - finalized_txs.get_observable()); -} - -void PendingTransactionStorageInit::setMstSubscriptions( - const MstProcessor &mst_processor) { - mst_processor.onStateUpdate().subscribe(pending_storage_lifetime, - updated_batches.get_subscriber()); - mst_processor.onPreparedBatches().subscribe(pending_storage_lifetime, - prepared_batch.get_subscriber()); - mst_processor.onExpiredBatches().subscribe(pending_storage_lifetime, - expired_batch.get_subscriber()); -} - -void PendingTransactionStorageInit::setFinalizedTxsSubscription( - rxcpp::observable finalized_txs) { - finalized_txs.subscribe(pending_storage_lifetime, - this->finalized_txs.get_subscriber()); -} - -PendingTransactionStorageInit::~PendingTransactionStorageInit() { - pending_storage_lifetime.unsubscribe(); -} diff --git a/irohad/main/impl/pending_transaction_storage_init.hpp b/irohad/main/impl/pending_transaction_storage_init.hpp deleted file mode 100644 index 55a5a27464a..00000000000 --- a/irohad/main/impl/pending_transaction_storage_init.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_PENDING_TRANSACTION_STORAGE_INIT_HPP -#define IROHA_PENDING_TRANSACTION_STORAGE_INIT_HPP - -#include - -#include -#include "interfaces/common_objects/types.hpp" - -namespace shared_model { - namespace interface { - class TransactionBatch; - } -} // namespace shared_model - -namespace iroha { - - class MstProcessor; - class MstState; - class PendingTransactionStorage; - - namespace network { - class PeerCommunicationService; - } - - class PendingTransactionStorageInit { - public: - PendingTransactionStorageInit(); - - std::shared_ptr - createPendingTransactionsStorage(); - - void setMstSubscriptions(const MstProcessor &mst_processor); - - void setFinalizedTxsSubscription( - rxcpp::observable - finalized_txs); - - ~PendingTransactionStorageInit(); - - protected: - rxcpp::composite_subscription pending_storage_lifetime; - rxcpp::subjects::subject> updated_batches; - rxcpp::subjects::subject< - std::shared_ptr> - prepared_batch; - rxcpp::subjects::subject< - std::shared_ptr> - expired_batch; - rxcpp::subjects::subject< - std::pair> - prepared_txs; - rxcpp::subjects::subject - finalized_txs; - }; -} // namespace iroha - -#endif // IROHA_PENDING_TRANSACTION_STORAGE_INIT_HPP diff --git a/irohad/main/impl/pg_connection_init.cpp b/irohad/main/impl/pg_connection_init.cpp index 78bb08722bd..aa19d36cbf5 100644 --- a/irohad/main/impl/pg_connection_init.cpp +++ b/irohad/main/impl/pg_connection_init.cpp @@ -7,6 +7,7 @@ #include #include + #include "ametsuchi/impl/k_times_reconnection_strategy.hpp" #include "ametsuchi/impl/pool_wrapper.hpp" #include "common/irohad_version.hpp" @@ -19,6 +20,36 @@ namespace { /// Database connection pool size. Limits the number of similtaneous accesses. constexpr int kDbPoolSize = 10; + /// Prototypes + void prepareTables(soci::session &session); + bool preparedTransactionsAvailable(soci::session &sql); + iroha::expected::Result createSchema( + const PostgresOptions &postgres_options); + /** + * Function initializes existing connection pool + * @param connection_pool - pool with connections + * @param pool_size - number of connections in pool + * @param try_rollback - function which performs blocks rollback before + * initialization + * @param callback_factory - factory for reconnect callbacks + * @param reconnection_strategy_factory - factory which creates strategies + * for each connection + * @param pg_reconnection_options - parameter of connection startup on + * reconnect + * @param log_manager - log manager of storage + * @tparam RollbackFunction - type of rollback function + * @return void value on success or string error + */ + template + iroha::expected::Result initializeConnectionPool( + soci::connection_pool &connection_pool, + size_t pool_size, + RollbackFunction try_rollback, + FailoverCallbackHolder &callback_factory, + const ReconnectionStrategyFactory &reconnection_strategy_factory, + const std::string &pg_reconnection_options, + logger::LoggerManagerTreePtr log_manager); + std::string formatPostgresMessage(const char *message) { std::string formatted_message(message); boost::replace_if(formatted_message, boost::is_any_of("\r\n"), ' '); @@ -98,25 +129,6 @@ namespace { log->debug("{}", formatPostgresMessage(message)); } - iroha::expected::Result dropDatabaseIfExists( - soci::session &maintenance_sql, const std::string &db_name) { - try { - size_t count; - maintenance_sql - << "SELECT count(datname) FROM pg_catalog.pg_database WHERE " - "datname = :db_name", - soci::into(count), soci::use(db_name, "db_name"); - - if (count == 1) { - maintenance_sql << "DROP DATABASE " + db_name; - } - } catch (std::exception &e) { - return fmt::format( - "Dropping database '{}' failed: {}", db_name, e.what()); - } - return iroha::expected::Value(); - } - iroha::expected::Result, std::string> initPostgresConnection(std::string &options_str, size_t pool_size) { auto pool = std::make_shared(pool_size); @@ -131,178 +143,98 @@ namespace { } return iroha::expected::makeValue(pool); } -} // namespace - -iroha::expected::Result, - std::string> -PgConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, - iroha::ametsuchi::PostgresOptions const &pg_opt, - logger::LoggerManagerTreePtr log_manager) { - return prepareWorkingDatabase(startup_wsv_data_policy, pg_opt) | [&] { - return prepareConnectionPool(KTimesReconnectionStrategyFactory{10}, - pg_opt, - kDbPoolSize, - log_manager); - }; -} - -iroha::expected::Result -PgConnectionInit::prepareWorkingDatabase( - StartupWsvDataPolicy startup_wsv_data_policy, - const PostgresOptions &options) { - return getMaintenanceSession(options) | [&](auto maintenance_sql) { - if (startup_wsv_data_policy == StartupWsvDataPolicy::kReuse) { - return isSchemaCompatible(options) | [&](bool is_compatible) - -> iroha::expected::Result { - if (not is_compatible) { - return "The schema is not compatible. " - "Either overwrite the ledger or use a compatible binary " - "version."; - } - return iroha::expected::Value{}; - }; - } - return dropWorkingDatabase(options) | [&] { return createSchema(options); }; - }; -} - -iroha::expected::Result, std::string> -PgConnectionInit::prepareConnectionPool( - const ReconnectionStrategyFactory &reconnection_strategy_factory, - const PostgresOptions &options, - const int pool_size, - logger::LoggerManagerTreePtr log_manager) { - auto options_str = options.workingConnectionString(); - - auto conn = initPostgresConnection(options_str, pool_size); - if (auto e = boost::get>(&conn)) { - return *e; - } - auto &connection = - boost::get>>(conn) - .value; - - soci::session sql(*connection); - bool enable_prepared_transactions = preparedTransactionsAvailable(sql); - try { - auto try_rollback = [&](soci::session &session) { - if (enable_prepared_transactions) { - rollbackPrepared(session, options.preparedBlockName()) - .match([](auto &&v) {}, - [&](auto &&e) { - log_manager->getLogger()->warn( - "rollback on creation has failed: {}", e.error); - }); - } + template + iroha::expected::Result initializeConnectionPool( + soci::connection_pool &connection_pool, + size_t pool_size, + RollbackFunction try_rollback, + FailoverCallbackHolder &callback_factory, + const ReconnectionStrategyFactory &reconnection_strategy_factory, + const std::string &pg_reconnection_options, + logger::LoggerManagerTreePtr log_manager) { + auto log = log_manager->getLogger(); + auto initialize_session = + [&](soci::session &session, auto on_init_db, auto on_init_connection) { + auto *backend = static_cast( + session.get_backend()); + PQsetNoticeProcessor(backend->conn_, &processPqNotice, log.get()); + on_init_connection(session); + + // TODO: 2019-05-06 @muratovv rework unhandled exception with Result + // IR-464 + on_init_db(session); + }; + + /// lambda contains special actions which should be execute once + auto init_db = [&](soci::session &session) { + // rollback current prepared transaction + // if there exists any since last session + try_rollback(session); }; - std::unique_ptr failover_callback_factory = - std::make_unique(); + /// lambda contains actions which should be invoked once for each + /// session + auto init_failover_callback = [&](soci::session &session) { + static size_t connection_index = 0; + auto restore_session = [initialize_session](soci::session &s) { + return initialize_session(s, [](auto &) {}, [](auto &) {}); + }; - return initializeConnectionPool(*connection, - pool_size, - try_rollback, - *failover_callback_factory, - reconnection_strategy_factory, - options_str, - log_manager) - | [&]() -> iroha::expected::Result, - std::string> { - return std::make_shared( - std::move(connection), - std::move(failover_callback_factory), - enable_prepared_transactions); + auto &callback = callback_factory.makeFailoverCallback( + session, + restore_session, + pg_reconnection_options, + reconnection_strategy_factory.create(), + log_manager + ->getChild("SOCI connection " + + std::to_string(connection_index++)) + ->getLogger()); + + session.set_failover_callback(callback); }; - } catch (const std::exception &e) { - return expected::makeError(e.what()); - } -} + assert(pool_size > 0); -bool PgConnectionInit::preparedTransactionsAvailable(soci::session &sql) { - int prepared_txs_count = 0; - try { - sql << "SHOW max_prepared_transactions;", soci::into(prepared_txs_count); - return prepared_txs_count != 0; - } catch (std::exception &e) { - return false; + initialize_session(connection_pool.at(0), init_db, init_failover_callback); + for (size_t i = 1; i != pool_size; i++) { + soci::session &session = connection_pool.at(i); + initialize_session(session, [](auto &) {}, init_failover_callback); + } + return iroha::expected::Value(); } -} -iroha::expected::Result PgConnectionInit::rollbackPrepared( - soci::session &sql, const std::string &prepared_block_name) { - try { - sql << "ROLLBACK PREPARED '" + prepared_block_name + "';"; - } catch (const std::exception &e) { - return iroha::expected::makeError(formatPostgresMessage(e.what())); + iroha::expected::Result createSchema( + const PostgresOptions &postgres_options) { + try { + return getMaintenanceSession(postgres_options) | + [&](auto maintenance_sql) { + *maintenance_sql << fmt::format("create database {};", + postgres_options.workingDbName()); + return getWorkingDbSession(postgres_options) | [](auto session) + -> iroha::expected::Result { + prepareTables(*session); + return iroha::expected::Value{}; + }; + }; + } catch (const std::exception &e) { + return e.what(); + } } - return {}; -} -template -iroha::expected::Result -PgConnectionInit::initializeConnectionPool( - soci::connection_pool &connection_pool, - size_t pool_size, - RollbackFunction try_rollback, - FailoverCallbackHolder &callback_factory, - const ReconnectionStrategyFactory &reconnection_strategy_factory, - const std::string &pg_reconnection_options, - logger::LoggerManagerTreePtr log_manager) { - auto log = log_manager->getLogger(); - auto initialize_session = [&](soci::session &session, - auto on_init_db, - auto on_init_connection) { - auto *backend = - static_cast(session.get_backend()); - PQsetNoticeProcessor(backend->conn_, &processPqNotice, log.get()); - on_init_connection(session); - - // TODO: 2019-05-06 @muratovv rework unhandled exception with Result - // IR-464 - on_init_db(session); - }; - - /// lambda contains special actions which should be execute once - auto init_db = [&](soci::session &session) { - // rollback current prepared transaction - // if there exists any since last session - try_rollback(session); - }; - - /// lambda contains actions which should be invoked once for each - /// session - auto init_failover_callback = [&](soci::session &session) { - static size_t connection_index = 0; - auto restore_session = [initialize_session](soci::session &s) { - return initialize_session(s, [](auto &) {}, [](auto &) {}); - }; - - auto &callback = callback_factory.makeFailoverCallback( - session, - restore_session, - pg_reconnection_options, - reconnection_strategy_factory.create(), - log_manager - ->getChild("SOCI connection " + std::to_string(connection_index++)) - ->getLogger()); - - session.set_failover_callback(callback); - }; - - assert(pool_size > 0); - - initialize_session(connection_pool.at(0), init_db, init_failover_callback); - for (size_t i = 1; i != pool_size; i++) { - soci::session &session = connection_pool.at(i); - initialize_session(session, [](auto &) {}, init_failover_callback); + bool preparedTransactionsAvailable(soci::session &sql) { + int prepared_txs_count = 0; + try { + sql << "SHOW max_prepared_transactions;", soci::into(prepared_txs_count); + return prepared_txs_count != 0; + } catch (std::exception &e) { + return false; + } } - return expected::Value(); -} -void PgConnectionInit::prepareTables(soci::session &session) { - static const std::string prepare_tables_sql = R"( + void prepareTables(soci::session &session) { + static const std::string prepare_tables_sql = + R"( CREATE TABLE schema_version ( lock CHAR(1) DEFAULT 'X' NOT NULL PRIMARY KEY, iroha_major int not null, @@ -312,12 +244,12 @@ CREATE TABLE schema_version ( insert into schema_version (iroha_major, iroha_minor, iroha_patch) values ()" - + - [] { - auto v = iroha::getIrohadVersion(); - return fmt::format("{}, {}, {}", v.major, v.minor, v.patch); - }() - + R"(); + + + [] { + auto v = iroha::getIrohadVersion(); + return fmt::format("{}, {}, {}", v.major, v.minor, v.patch); + }() + + R"(); CREATE TABLE top_block_info ( lock CHAR(1) DEFAULT 'X' NOT NULL PRIMARY KEY, height int, @@ -369,8 +301,8 @@ CREATE TABLE account_has_asset ( CREATE TABLE role_has_permissions ( role_id character varying(32) NOT NULL REFERENCES role, permission bit()" - + std::to_string(shared_model::interface::RolePermissionSet::size()) - + R"() NOT NULL, + + std::to_string(shared_model::interface::RolePermissionSet::size()) + + R"() NOT NULL, PRIMARY KEY (role_id) ); CREATE TABLE account_has_roles ( @@ -382,8 +314,9 @@ CREATE TABLE account_has_grantable_permissions ( permittee_account_id character varying(288) NOT NULL REFERENCES account, account_id character varying(288) NOT NULL REFERENCES account, permission bit()" - + std::to_string(shared_model::interface::GrantablePermissionSet::size()) - + R"() NOT NULL, + + std::to_string( + shared_model::interface::GrantablePermissionSet::size()) + + R"() NOT NULL, PRIMARY KEY (permittee_account_id, account_id) ); CREATE TABLE IF NOT EXISTS tx_positions ( @@ -452,34 +385,129 @@ CREATE INDEX IF NOT EXISTS burrow_tx_logs_topics_log_idx USING btree (log_idx ASC); )"; - session << prepare_tables_sql; -} + session << prepare_tables_sql; + } +} // namespace -iroha::expected::Result -PgConnectionInit::dropWorkingDatabase(const PostgresOptions &options) { - return getMaintenanceSession(options) | [&](auto maintenance_sql) - -> iroha::expected::Result { - return dropDatabaseIfExists(*maintenance_sql, options.workingDbName()); +iroha::expected::Result, + std::string> +PgConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, + iroha::ametsuchi::PostgresOptions const &pg_opt, + logger::LoggerManagerTreePtr log_manager) { + return prepareWorkingDatabase(startup_wsv_data_policy, pg_opt) | [&] { + return prepareConnectionPool(KTimesReconnectionStrategyFactory{10}, + pg_opt, + kDbPoolSize, + log_manager); }; } -iroha::expected::Result PgConnectionInit::createSchema( - const PostgresOptions &postgres_options) { - try { - return getMaintenanceSession(postgres_options) | [&](auto maintenance_sql) { - *maintenance_sql << fmt::format("create database {};", - postgres_options.workingDbName()); - return getWorkingDbSession(postgres_options) | [](auto session) +iroha::expected::Result +PgConnectionInit::prepareWorkingDatabase( + StartupWsvDataPolicy startup_wsv_data_policy, + const PostgresOptions &options) { + return getMaintenanceSession(options) | [&](auto maintenance_sql) { + int work_db_exists; + *maintenance_sql << "select exists(" + "SELECT datname FROM pg_catalog.pg_database " + "WHERE datname = '" + + options.workingDbName() + "');", + soci::into(work_db_exists); + if (not work_db_exists) { + return createSchema(options); + } + if (startup_wsv_data_policy == StartupWsvDataPolicy::kDrop) { + return dropWorkingDatabase(options) | + [&] { return createSchema(options); }; + } else { // StartupWsvDataPolicy::kReuse + return isSchemaCompatible(options) | [&](bool is_compatible) -> iroha::expected::Result { - prepareTables(*session); + if (not is_compatible) { + return "The schema is not compatible. " + "Either overwrite the ledger or use a compatible binary " + "version."; + } return iroha::expected::Value{}; }; + } + }; +} + +iroha::expected::Result, std::string> +PgConnectionInit::prepareConnectionPool( + const ReconnectionStrategyFactory &reconnection_strategy_factory, + const PostgresOptions &options, + const int pool_size, + logger::LoggerManagerTreePtr log_manager) { + auto options_str = options.workingConnectionString(); + + auto conn = initPostgresConnection(options_str, pool_size); + if (auto e = boost::get>(&conn)) { + return *e; + } + + auto &connection = + boost::get>>(conn) + .value; + + soci::session sql(*connection); + bool enable_prepared_transactions = preparedTransactionsAvailable(sql); + try { + auto try_rollback = [&](soci::session &session) { + if (enable_prepared_transactions) { + rollbackPrepared(session, options.preparedBlockName()) + .match([](auto &&v) {}, + [&](auto &&e) { + log_manager->getLogger()->warn( + "rollback on creation has failed: {}", e.error); + }); + } }; + + std::unique_ptr failover_callback_factory = + std::make_unique(); + + return initializeConnectionPool(*connection, + pool_size, + try_rollback, + *failover_callback_factory, + reconnection_strategy_factory, + options_str, + log_manager) + | [&]() -> iroha::expected::Result, + std::string> { + return std::make_shared( + std::move(connection), + std::move(failover_callback_factory), + enable_prepared_transactions); + }; + } catch (const std::exception &e) { - return e.what(); + return expected::makeError(e.what()); } } +iroha::expected::Result PgConnectionInit::rollbackPrepared( + soci::session &sql, const std::string &prepared_block_name) { + try { + sql << "ROLLBACK PREPARED '" + prepared_block_name + "';"; + } catch (const std::exception &e) { + return iroha::expected::makeError(formatPostgresMessage(e.what())); + } + return {}; +} + +iroha::expected::Result +PgConnectionInit::dropWorkingDatabase(const PostgresOptions &options) try { + auto maintenance_sql = soci::session(*soci::factory_postgresql(), + options.maintenanceConnectionString()); + maintenance_sql << "DROP DATABASE IF EXISTS " << options.workingDbName() + << ";"; + return iroha::expected::Value{}; +} catch (const std::exception &e) { + return e.what(); +} + iroha::expected::Result PgConnectionInit::resetPeers( soci::session &sql) { try { diff --git a/irohad/main/impl/pg_connection_init.hpp b/irohad/main/impl/pg_connection_init.hpp index 23c1182e5ee..47254f3df4f 100644 --- a/irohad/main/impl/pg_connection_init.hpp +++ b/irohad/main/impl/pg_connection_init.hpp @@ -47,11 +47,6 @@ namespace iroha { const int pool_size, logger::LoggerManagerTreePtr log_manager); - /** - * Verify whether postgres supports prepared transactions - */ - static bool preparedTransactionsAvailable(soci::session &sql); - static iroha::expected::Result rollbackPrepared( soci::session &sql, const std::string &prepared_block_name); @@ -67,42 +62,6 @@ namespace iroha { * @return error message if reset has failed */ static expected::Result resetPeers(soci::session &sql); - - /// Create tables in the given session. Left public for tests. - static void prepareTables(soci::session &session); - - /** - * Creates schema. Working database must not exist when calling this. - * @return void value in case of success or an error message otherwise. - */ - static expected::Result createSchema( - const PostgresOptions &postgres_options); - - private: - /** - * Function initializes existing connection pool - * @param connection_pool - pool with connections - * @param pool_size - number of connections in pool - * @param try_rollback - function which performs blocks rollback before - * initialization - * @param callback_factory - factory for reconnect callbacks - * @param reconnection_strategy_factory - factory which creates strategies - * for each connection - * @param pg_reconnection_options - parameter of connection startup on - * reconnect - * @param log_manager - log manager of storage - * @tparam RollbackFunction - type of rollback function - * @return void value on success or string error - */ - template - static expected::Result initializeConnectionPool( - soci::connection_pool &connection_pool, - size_t pool_size, - RollbackFunction try_rollback, - FailoverCallbackHolder &callback_factory, - const ReconnectionStrategyFactory &reconnection_strategy_factory, - const std::string &pg_reconnection_options, - logger::LoggerManagerTreePtr log_manager); }; } // namespace ametsuchi } // namespace iroha diff --git a/irohad/main/impl/rocksdb_connection_init.cpp b/irohad/main/impl/rocksdb_connection_init.cpp new file mode 100644 index 00000000000..24f5a996caf --- /dev/null +++ b/irohad/main/impl/rocksdb_connection_init.cpp @@ -0,0 +1,122 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "main/impl/rocksdb_connection_init.hpp" + +#include +#include + +#include "common/irohad_version.hpp" +#include "logger/logger.hpp" +#include "logger/logger_manager.hpp" + +using namespace iroha::ametsuchi; + +namespace { + + /// WSV schema version is identified by compatibile irohad version. + using SchemaVersion = iroha::IrohadVersion; + + /** + * Checks schema compatibility. + * @return value of true if the schema in the provided database is + * compatibile with this code, false if not and an error message if the + * check could not be performed. + */ + iroha::expected::Result isSchemaCompatible( + RocksDbCommon &common, const RocksDbOptions &options) { + RDB_TRY_GET_VALUE_OR_STR_ERR( + version, + forWSVVersion(common)); + return *version == iroha::getIrohadVersion(); + } + + iroha::expected::Result createSchema( + RocksDbCommon &common, const RocksDbOptions &options) { + auto const version = iroha::getIrohadVersion(); + common.valueBuffer() = std::to_string(version.major); + common.valueBuffer() += '#'; + common.valueBuffer() += std::to_string(version.minor); + common.valueBuffer() += '#'; + common.valueBuffer() += std::to_string(version.patch); + + RDB_ERROR_CHECK_TO_STR(forStoreVersion(common)); + RDB_ERROR_CHECK_TO_STR(forWSVVersion(common)); + + return {}; + } + +} // namespace + +iroha::expected::Result, std::string> +RdbConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, + iroha::ametsuchi::RocksDbOptions const &opt, + logger::LoggerManagerTreePtr log_manager) { + return prepareWorkingDatabase(startup_wsv_data_policy, opt); +} + +iroha::expected::Result, std::string> +RdbConnectionInit::prepareWorkingDatabase( + StartupWsvDataPolicy startup_wsv_data_policy, + const iroha::ametsuchi::RocksDbOptions &options) { + auto port = std::make_shared(); + if (auto result = port->initialize(options.dbPath()); + expected::hasError(result)) + return expected::makeError( + fmt::format("Initialize db failed. Error code: {}, description: {}", + result.assumeError().code, + result.assumeError().description)); + + auto db_context = std::make_shared(port); + RocksDbCommon common(db_context); + + std::optional wsv_version; + if (auto result = + forWSVVersion(common); + expected::hasError(result)) + return expected::makeError( + fmt::format("Request schema failed. Error code: {}, description: {}", + result.assumeError().code, + result.assumeError().description)); + else + wsv_version = std::move(result.assumeValue()); + + std::optional store_version; + if (auto result = + forStoreVersion(common); + expected::hasError(result)) + return expected::makeError( + fmt::format("Request schema failed. Error code: {}, description: {}", + result.assumeError().code, + result.assumeError().description)); + else + store_version = std::move(result.assumeValue()); + + if (!wsv_version || !store_version + || startup_wsv_data_policy == StartupWsvDataPolicy::kDrop) { + RDB_ERROR_CHECK(dropWorkingDatabase(common, options)); + RDB_ERROR_CHECK(createSchema(common, options)); + common.commit(); + return port; + } + + return isSchemaCompatible(common, options) | [port](bool is_compatible) + -> iroha::expected::Result, + std::string> { + if (not is_compatible) { + return "The schema is not compatible. " + "Either overwrite the ledger or use a compatible binary " + "version."; + } + return port; + }; +} + +iroha::expected::Result +RdbConnectionInit::dropWorkingDatabase( + RocksDbCommon &common, const iroha::ametsuchi::RocksDbOptions &options) { + RDB_ERROR_CHECK_TO_STR(dropWSV(common)); + return {}; +} diff --git a/irohad/main/impl/rocksdb_connection_init.hpp b/irohad/main/impl/rocksdb_connection_init.hpp new file mode 100644 index 00000000000..e26450efc30 --- /dev/null +++ b/irohad/main/impl/rocksdb_connection_init.hpp @@ -0,0 +1,48 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_RDB_CONNECTION_INIT_HPP +#define IROHA_RDB_CONNECTION_INIT_HPP + +#include +#include + +#include "ametsuchi/impl/failover_callback_holder.hpp" +#include "ametsuchi/impl/rocksdb_command_executor.hpp" +#include "ametsuchi/impl/rocksdb_options.hpp" +#include "common/result.hpp" +#include "interfaces/permissions.hpp" +#include "logger/logger_fwd.hpp" +#include "logger/logger_manager_fwd.hpp" +#include "main/startup_params.hpp" + +namespace iroha::ametsuchi { + + struct RocksDBPort; + struct RocksDBContext; + class RocksDbCommon; + + class RdbConnectionInit { + public: + static expected::Result, std::string> init( + StartupWsvDataPolicy startup_wsv_data_policy, + iroha::ametsuchi::RocksDbOptions const &opt, + logger::LoggerManagerTreePtr log_manager); + + static expected::Result, std::string> + prepareWorkingDatabase(StartupWsvDataPolicy startup_wsv_data_policy, + const iroha::ametsuchi::RocksDbOptions &options); + + /* + * Drop working database. + * @return Error message if dropping has failed. + */ + static expected::Result dropWorkingDatabase( + RocksDbCommon &common, const iroha::ametsuchi::RocksDbOptions &options); + }; + +} // namespace iroha::ametsuchi + +#endif // IROHA_PG_CONNECTION_INIT_HPP diff --git a/irohad/main/impl/storage_init.cpp b/irohad/main/impl/storage_init.cpp index 84e0b41590e..cf1505335bd 100644 --- a/irohad/main/impl/storage_init.cpp +++ b/irohad/main/impl/storage_init.cpp @@ -9,23 +9,26 @@ #include #include "ametsuchi/impl/flat_file_block_storage.hpp" +#include "ametsuchi/impl/in_memory_block_storage_factory.hpp" #include "ametsuchi/impl/pool_wrapper.hpp" #include "ametsuchi/impl/postgres_block_storage_factory.hpp" +#include "ametsuchi/impl/rocksdb_block_storage.hpp" +#include "ametsuchi/impl/rocksdb_block_storage_factory.hpp" +#include "ametsuchi/impl/rocksdb_storage_impl.hpp" +#include "ametsuchi/impl/storage_base.hpp" #include "ametsuchi/impl/storage_impl.hpp" #include "backend/protobuf/proto_block_json_converter.hpp" #include "backend/protobuf/proto_permission_to_string.hpp" #include "common/result.hpp" #include "generator/generator.hpp" #include "interfaces/iroha_internal/query_response_factory.hpp" -#include "logger/logger.hpp" #include "logger/logger_manager.hpp" #include "main/impl/pg_connection_init.hpp" +#include "main/subscription.hpp" #include "validators/always_valid_validator.hpp" #include "validators/protobuf/proto_block_validator.hpp" -using namespace iroha::ametsuchi; - -using namespace std::chrono_literals; +namespace ametsuchi = iroha::ametsuchi; using shared_model::interface::types::PublicKeyHexStringView; @@ -34,24 +37,30 @@ class StorageInitException : public std::runtime_error { }; namespace { - std::unique_ptr makeFlatFileBlockStorage( + std::unique_ptr makeFlatFileBlockStorage( std::string const &block_storage_dir, logger::LoggerManagerTreePtr log_manager) { - auto flat_file = FlatFile::create( + auto flat_file = ametsuchi::FlatFile::create( block_storage_dir, log_manager->getChild("FlatFile")->getLogger()); if (auto err = iroha::expected::resultToOptionalError(flat_file)) { throw StorageInitException{err.value()}; } - std::shared_ptr - block_converter = - std::make_shared(); - return std::make_unique( + return std::make_unique( std::move(flat_file.assumeValue()), - block_converter, + std::make_shared(), log_manager->getChild("FlatFileBlockStorage")->getLogger()); } - std::unique_ptr makePostgresBlockStorage( + std::unique_ptr makeRocksDbBlockStorage( + std::shared_ptr db_context, + logger::LoggerManagerTreePtr log_manager) { + return std::make_unique( + std::move(db_context), + std::make_shared(), + log_manager->getChild("RocksDbBlockStorage")->getLogger()); + } + + std::unique_ptr makePostgresBlockStorage( std::shared_ptr pool_wrapper, std::shared_ptr block_factory, logger::LoggerManagerTreePtr log_manager) { @@ -59,15 +68,17 @@ namespace { const std::string persistent_table("blocks"); if (auto err = iroha::expected::resultToOptionalError( - PostgresBlockStorageFactory::createTable(*sql, persistent_table))) { + ametsuchi::PostgresBlockStorageFactory::createTable( + *sql, persistent_table))) { throw StorageInitException{err.value()}; } - auto block_storage = PostgresBlockStorage::create(std::move(pool_wrapper), - block_factory, - persistent_table, - false, - log_manager->getLogger()); + auto block_storage = + ametsuchi::PostgresBlockStorage::create(std::move(pool_wrapper), + block_factory, + persistent_table, + false, + log_manager->getLogger()); if (auto err = iroha::expected::resultToOptionalError(block_storage)) { throw StorageInitException{err.value()}; } @@ -76,6 +87,46 @@ namespace { } } // namespace +iroha::expected::Result, std::string> +iroha::initStorage( + std::shared_ptr db_context, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + boost::optional block_storage_dir, + std::optional> + vm_caller_ref, + std::function)> + callback, + logger::LoggerManagerTreePtr log_manager) { + auto perm_converter = + std::make_shared(); + + auto block_transport_factory = + std::make_shared( + std::make_unique>(), + std::make_unique()); + + std::unique_ptr + temporary_block_storage_factory = + std::make_unique(); + + auto persistent_block_storage = + makeRocksDbBlockStorage(db_context, log_manager); + + return ametsuchi::RocksDbStorageImpl::create( + std::move(db_context), + perm_converter, + std::move(pending_txs_storage), + std::move(query_response_factory), + std::move(temporary_block_storage_factory), + std::move(persistent_block_storage), + vm_caller_ref, + std::move(callback), + log_manager->getChild("Storage")); +} + iroha::expected::Result, std::string> iroha::initStorage( iroha::ametsuchi::PostgresOptions const &pg_opt, @@ -86,6 +137,8 @@ iroha::initStorage( boost::optional block_storage_dir, std::optional> vm_caller_ref, + std::function)> + callback, logger::LoggerManagerTreePtr log_manager) { try { auto perm_converter = @@ -99,26 +152,29 @@ iroha::initStorage( shared_model::interface::Block>>(), std::make_unique()); - std::unique_ptr temporary_block_storage_factory = - std::make_unique( - pool_wrapper, - block_transport_factory, - []() { return generator::randomString(20); }, - log_manager->getChild("TemporaryBlockStorage")->getLogger()); + std::unique_ptr + temporary_block_storage_factory = + std::make_unique( + pool_wrapper, + block_transport_factory, + []() { return generator::randomString(20); }, + log_manager->getChild("TemporaryBlockStorage")->getLogger()); auto persistent_block_storage = block_storage_dir ? makeFlatFileBlockStorage(block_storage_dir.value(), log_manager) : makePostgresBlockStorage( pool_wrapper, block_transport_factory, log_manager); - return StorageImpl::create(pg_opt, - pool_wrapper, - perm_converter, - std::move(pending_txs_storage), - std::move(query_response_factory), - std::move(temporary_block_storage_factory), - std::move(persistent_block_storage), - vm_caller_ref, - log_manager->getChild("Storage")); + return ametsuchi::StorageImpl::create( + pg_opt, + pool_wrapper, + perm_converter, + std::move(pending_txs_storage), + std::move(query_response_factory), + std::move(temporary_block_storage_factory), + std::move(persistent_block_storage), + vm_caller_ref, + std::move(callback), + log_manager->getChild("Storage")); } catch (StorageInitException const &e) { return iroha::expected::makeError( fmt::format("Storage initialization failed: ", e.what())); diff --git a/irohad/main/impl/storage_init.hpp b/irohad/main/impl/storage_init.hpp index e0e8ba40712..66c8a757b41 100644 --- a/irohad/main/impl/storage_init.hpp +++ b/irohad/main/impl/storage_init.hpp @@ -17,8 +17,9 @@ #include "logger/logger_manager_fwd.hpp" namespace shared_model::interface { + class Block; class QueryResponseFactory; -} +} // namespace shared_model::interface namespace iroha { class PendingTransactionStorage; @@ -26,10 +27,25 @@ namespace iroha { namespace ametsuchi { struct PoolWrapper; class PostgresOptions; + class RocksDbOptions; class Storage; class VmCaller; + struct RocksDBContext; } // namespace ametsuchi + expected::Result, std::string> + initStorage( + std::shared_ptr db_context, + std::shared_ptr pending_txs_storage, + std::shared_ptr + query_response_factory, + boost::optional block_storage_dir, + std::optional> + vm_caller_ref, + std::function)> + callback, + logger::LoggerManagerTreePtr log_manager); + expected::Result, std::string> initStorage( iroha::ametsuchi::PostgresOptions const &pg_opt, @@ -40,6 +56,8 @@ namespace iroha { boost::optional block_storage_dir, std::optional> vm_caller_ref, + std::function)> + callback, logger::LoggerManagerTreePtr log_manager); } // namespace iroha diff --git a/irohad/main/impl/subscription.cpp b/irohad/main/impl/subscription.cpp new file mode 100644 index 00000000000..925bbbbb7da --- /dev/null +++ b/irohad/main/impl/subscription.cpp @@ -0,0 +1,27 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "main/subscription.hpp" + +#include + +namespace iroha { + + std::shared_ptr getSubscription() { + static std::weak_ptr engine; + if (auto ptr = engine.lock()) + return ptr; + + static std::mutex engine_cs; + std::lock_guard lock(engine_cs); + if (auto ptr = engine.lock()) + return ptr; + + auto ptr = std::make_shared(getDispatcher()); + engine = ptr; + return ptr; + } + +} // namespace iroha diff --git a/irohad/main/impl/sync_dispatcher.cpp b/irohad/main/impl/sync_dispatcher.cpp new file mode 100644 index 00000000000..93f9f8215d6 --- /dev/null +++ b/irohad/main/impl/sync_dispatcher.cpp @@ -0,0 +1,18 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "main/subscription.hpp" + +#include "subscription/sync_dispatcher_impl.hpp" + +namespace iroha { + + std::shared_ptr getDispatcher() { + return std::make_shared< + subscription::SyncDispatcher>(); + } + +} // namespace iroha diff --git a/irohad/main/iroha_conf_literals.cpp b/irohad/main/iroha_conf_literals.cpp index 997d23eb9ce..b3b2350dcd7 100644 --- a/irohad/main/iroha_conf_literals.cpp +++ b/irohad/main/iroha_conf_literals.cpp @@ -26,8 +26,11 @@ namespace config_members { const char *Password = "password"; const char *WorkingDbName = "working database"; const char *MaintenanceDbName = "maintenance database"; + const char *DbPath = "path"; + const char *DbType = "type"; const char *MaxProposalSize = "max_proposal_size"; const char *ProposalDelay = "proposal_delay"; + const char *ProposalCreationTimeout = "proposal_creation_timeout"; const char *VoteDelay = "vote_delay"; const char *MstSupport = "mst_enable"; const char *MstExpirationTime = "mst_expiration_time"; @@ -55,4 +58,5 @@ namespace config_members { const char *kSigner = "signer"; const char *kCryptoProviderDefault = "default"; const char *PrivateKey = "private_key"; + const char *kMetrics = "metrics"; } // namespace config_members diff --git a/irohad/main/iroha_conf_literals.hpp b/irohad/main/iroha_conf_literals.hpp index 6c50511f363..1f0f211ed25 100644 --- a/irohad/main/iroha_conf_literals.hpp +++ b/irohad/main/iroha_conf_literals.hpp @@ -32,8 +32,11 @@ namespace config_members { extern const char *Password; extern const char *WorkingDbName; extern const char *MaintenanceDbName; + extern const char *DbPath; + extern const char *DbType; extern const char *MaxProposalSize; extern const char *ProposalDelay; + extern const char *ProposalCreationTimeout; extern const char *VoteDelay; extern const char *MstSupport; extern const char *MstExpirationTime; diff --git a/irohad/main/iroha_conf_loader.cpp b/irohad/main/iroha_conf_loader.cpp index 85a82ad7e18..72db6fcb29c 100644 --- a/irohad/main/iroha_conf_loader.cpp +++ b/irohad/main/iroha_conf_loader.cpp @@ -162,8 +162,7 @@ class JsonDeserializerImpl { JsonDeserializerImpl getDictChild(std::string const &key) { return JsonDeserializerImpl{ common_objects_factory_, - env_path_ ? std::make_optional(makeEnvDictChildKey(key)) - : decltype(env_path_){}, + env_path_ ? std::make_optional(makeEnvDictChildKey(key)) : std::nullopt, json_ | [&](auto const &json) -> std::optional { assert_fatal(json_->get().IsObject(), "must be a JSON object."); auto const json_obj = json_->get().GetObject(); @@ -613,14 +612,21 @@ inline bool JsonDeserializerImpl::loadInto(IrohadConfig::InterPeerTls &dest) { template <> inline bool JsonDeserializerImpl::loadInto(IrohadConfig::DbConfig &dest) { - return getDictChild(config_members::Host).loadInto(dest.host) - and getDictChild(config_members::Port).loadInto(dest.port) - and getDictChild(config_members::User).loadInto(dest.user) - and getDictChild(config_members::Password).loadInto(dest.password) - and getDictChild(config_members::WorkingDbName) - .loadInto(dest.working_dbname) - and getDictChild(config_members::MaintenanceDbName) - .loadInto(dest.maintenance_dbname); + if (getDictChild(config_members::DbType).loadInto(dest.type)) { + if (dest.type == kDbTypeRocksdb) { + return getDictChild(config_members::DbPath).loadInto(dest.path); + } else if (dest.type == kDbTypePostgres) { + return getDictChild(config_members::Host).loadInto(dest.host) + and getDictChild(config_members::Port).loadInto(dest.port) + and getDictChild(config_members::User).loadInto(dest.user) + and getDictChild(config_members::Password).loadInto(dest.password) + and getDictChild(config_members::WorkingDbName) + .loadInto(dest.working_dbname) + and getDictChild(config_members::MaintenanceDbName) + .loadInto(dest.maintenance_dbname); + } + } + return false; } template <> @@ -684,6 +690,8 @@ inline bool JsonDeserializerImpl::loadInto(IrohadConfig &dest) { and (dest.database_config or getDictChild(PgOpt).loadInto(dest.pg_opt)) and getDictChild(MaxProposalSize).loadInto(dest.max_proposal_size) and getDictChild(ProposalDelay).loadInto(dest.proposal_delay) + and getDictChild(ProposalCreationTimeout) + .loadInto(dest.proposal_creation_timeout) and getDictChild(VoteDelay).loadInto(dest.vote_delay) and getDictChild(MstSupport).loadInto(dest.mst_support) and getDictChild(MstExpirationTime).loadInto(dest.mst_expiration_time) @@ -693,7 +701,8 @@ inline bool JsonDeserializerImpl::loadInto(IrohadConfig &dest) { and getDictChild(LogSection).loadInto(dest.logger_manager) and getDictChild(InitialPeers).loadInto(dest.initial_peers) and getDictChild(UtilityService).loadInto(dest.utility_service) - and getDictChild(kCrypto).loadInto(dest.crypto); + and getDictChild(kCrypto).loadInto(dest.crypto) + and (getDictChild("metrics").loadInto(dest.metrics_addr_port) or true); } // ------------ end of loadInto(path, dst, src) specializations ------------ diff --git a/irohad/main/iroha_conf_loader.hpp b/irohad/main/iroha_conf_loader.hpp index 8bfe3aa6c1e..655c151c586 100644 --- a/irohad/main/iroha_conf_loader.hpp +++ b/irohad/main/iroha_conf_loader.hpp @@ -17,8 +17,13 @@ #include "multihash/type.hpp" #include "torii/tls_params.hpp" +static const std::string kDbTypeRocksdb = "rocksdb"; +static const std::string kDbTypePostgres = "postgres"; + struct IrohadConfig { struct DbConfig { + std::string type; + std::string path; std::string host; uint16_t port; std::string user; @@ -61,9 +66,10 @@ struct IrohadConfig { bool mst_support; boost::optional mst_expiration_time; boost::optional max_round_delay_ms; + boost::optional proposal_creation_timeout; boost::optional stale_stream_max_rounds; boost::optional logger_manager; - boost::optional initial_peers; + std::optional initial_peers; boost::optional utility_service; // This is a part of cryto providers feature: @@ -86,6 +92,8 @@ struct IrohadConfig { }; boost::optional crypto; + + std::string metrics_addr_port; }; /** diff --git a/irohad/main/irohad.cpp b/irohad/main/irohad.cpp index 8694653ecd0..f2881058fda 100644 --- a/irohad/main/irohad.cpp +++ b/irohad/main/irohad.cpp @@ -3,13 +3,15 @@ * SPDX-License-Identifier: Apache-2.0 */ +#include +#include + #include #include #include +#include #include -#include -#include #include "ametsuchi/storage.hpp" #include "backend/protobuf/common_objects/proto_common_objects_factory.hpp" #include "common/bind.hpp" @@ -24,9 +26,11 @@ #include "logger/logger_manager.hpp" #include "main/application.hpp" #include "main/impl/pg_connection_init.hpp" +#include "main/impl/rocksdb_connection_init.hpp" #include "main/iroha_conf_literals.hpp" #include "main/iroha_conf_loader.hpp" #include "main/raw_block_loader.hpp" +#include "maintenance/metrics.hpp" #include "network/impl/channel_factory.hpp" #include "util/status_notifier.hpp" #include "util/utility_service.hpp" @@ -39,9 +43,6 @@ static const std::string kListenIp = "0.0.0.0"; static const std::string kLogSettingsFromConfigFile = "config_file"; -static const uint32_t kMstExpirationTimeDefault = 1440; -static const uint32_t kMaxRoundsDelayDefault = 3000; -static const uint32_t kStaleStreamMaxRoundsDefault = 2; static const std::string kDefaultWorkingDatabaseName{"iroha_default"}; static const std::chrono::milliseconds kExitCheckPeriod{1000}; @@ -65,7 +66,27 @@ DEFINE_string(keypair_name, "", "Specify name of .pub and .priv files"); */ DEFINE_bool(overwrite_ledger, false, "Overwrite ledger data if existing"); -DEFINE_bool(reuse_state, false, "Try to reuse existing state data at startup."); +/** + * Startup option to reuse existing WSV. Ignored since state is reused by + * default. + */ +DEFINE_bool(reuse_state, + true, + "Try to reuse existing state data at startup (Deprecated, startup " + "reuses state by default. Use drop_state to drop the WSV)."); + +/** + * Startup option to drop existing WSV. Cannot be used with 'reuse_state'. + */ +DEFINE_bool(drop_state, false, "Drops existing state data at startup."); + +/** + * Startup option for WSV synchronization mode. + */ +DEFINE_bool(wait_for_new_blocks, + false, + "Startup synchronization policy - waits for new blocks in " + "blockstore, does not run network"); static bool validateVerbosity(const char *flagname, const std::string &val) { if (val == kLogSettingsFromConfigFile) { @@ -88,6 +109,14 @@ static bool validateVerbosity(const char *flagname, const std::string &val) { DEFINE_string(verbosity, kLogSettingsFromConfigFile, "Log verbosity"); DEFINE_validator(verbosity, &validateVerbosity); +/// Metrics. ToDo validator +DEFINE_string(metrics_addr, + "127.0.0.1", + "Prometeus HTTP server listen address"); +DEFINE_string(metrics_port, + "", + "Prometeus HTTP server listens port, disabled by default"); + std::sig_atomic_t caught_signal = 0; std::promise exit_requested; @@ -184,8 +213,8 @@ int main(int argc, char *argv[]) { // Parsing command line arguments gflags::ParseCommandLineFlags(&argc, &argv, true); - logger::LoggerManagerTreePtr log_manager; - logger::LoggerPtr log; + logger::LoggerManagerTreePtr log_manager = getDefaultLogManager(); + logger::LoggerPtr log = log_manager->getChild("Init")->getLogger(); try { // If the global log level override was set in the command line arguments, @@ -197,13 +226,8 @@ int main(int argc, char *argv[]) { log = log_manager->getChild("Init")->getLogger(); } - // Reading iroha configuration file - std::optional maybe_log; - if (log) { - maybe_log = log; - } auto config_result = - parse_iroha_config(FLAGS_config, getCommonObjectsFactory(), maybe_log); + parse_iroha_config(FLAGS_config, getCommonObjectsFactory(), {log}); if (auto e = iroha::expected::resultToOptionalError(config_result)) { if (log) { log->error("Failed reading the configuration: {}", e.value()); @@ -212,7 +236,7 @@ int main(int argc, char *argv[]) { } auto config = std::move(config_result).assumeValue(); - if (not log_manager) { + if (FLAGS_verbosity == kLogSettingsFromConfigFile) { log_manager = config.logger_manager.value_or(getDefaultLogManager()); log = log_manager->getChild("Init")->getLogger(); } @@ -223,8 +247,7 @@ int main(int argc, char *argv[]) { log->critical( "Got an empty initial peers list in configuration file. You have to " "either specify some peers or avoid overriding the peers from " - "genesis " - "block!"); + "genesis block!"); return EXIT_FAILURE; } @@ -240,23 +263,34 @@ int main(int argc, char *argv[]) { daemon_status_notifier->notify( ::iroha::utility_service::Status::kInitialization); - BOOST_ASSERT_MSG(not FLAGS_keypair_name.empty() or config.crypto, - "keypair must be specified somewhere"); - - auto keypair = FLAGS_keypair_name.empty() - ? getKeypairFromConfig(config.crypto.value()) - : getKeypairFromFile(FLAGS_keypair_name, log_manager); + boost::optional keypair = boost::none; + if (!FLAGS_keypair_name.empty()) { + keypair = getKeypairFromFile(FLAGS_keypair_name, log_manager); + } else if (config.crypto.has_value()) { + keypair = getKeypairFromConfig(config.crypto.value()); + } std::unique_ptr pg_opt; + std::unique_ptr rdb_opt; if (config.database_config) { - pg_opt = std::make_unique( - config.database_config->host, - config.database_config->port, - config.database_config->user, - config.database_config->password, - config.database_config->working_dbname, - config.database_config->maintenance_dbname, - log); + if (config.database_config->type == kDbTypeRocksdb) + rdb_opt = std::make_unique( + config.database_config->path); + else if (config.database_config->type == kDbTypePostgres) + pg_opt = std::make_unique( + config.database_config->host, + config.database_config->port, + config.database_config->user, + config.database_config->password, + config.database_config->working_dbname, + config.database_config->maintenance_dbname, + log); + else { + log->critical("Unsupported database type!"); + daemon_status_notifier->notify( + ::iroha::utility_service::Status::kFailed); + return EXIT_FAILURE; + } } else if (config.pg_opt) { log->warn("Using deprecated database connection string!"); pg_opt = std::make_unique( @@ -269,29 +303,21 @@ int main(int argc, char *argv[]) { // Configuring iroha daemon auto irohad = std::make_unique( - config.block_store_path, + config, std::move(pg_opt), + std::move(rdb_opt), kListenIp, // TODO(mboldyrev) 17/10/2018: add a parameter in // config file and/or command-line arguments? - config.torii_port, - config.internal_port, - config.max_proposal_size, - std::chrono::milliseconds(config.proposal_delay), - std::chrono::milliseconds(config.vote_delay), - std::chrono::minutes( - config.mst_expiration_time.value_or(kMstExpirationTimeDefault)), std::move(keypair), - std::chrono::milliseconds( - config.max_round_delay_ms.value_or(kMaxRoundsDelayDefault)), - config.stale_stream_max_rounds.value_or(kStaleStreamMaxRoundsDefault), - std::move(config.initial_peers), log_manager->getChild("Irohad"), - FLAGS_reuse_state ? iroha::StartupWsvDataPolicy::kReuse - : iroha::StartupWsvDataPolicy::kDrop, - ::iroha::network::getDefaultChannelParams(), + FLAGS_drop_state ? iroha::StartupWsvDataPolicy::kDrop + : iroha::StartupWsvDataPolicy::kReuse, + FLAGS_wait_for_new_blocks + ? iroha::StartupWsvSynchronizationPolicy::kWaitForNewBlocks + : iroha::StartupWsvSynchronizationPolicy::kSyncUpAndGo, + std::nullopt, boost::make_optional(config.mst_support, iroha::GossipPropagationStrategyParams{}), - config.torii_tls_params, boost::none); // Check if iroha daemon storage was successfully initialized @@ -355,10 +381,18 @@ int main(int argc, char *argv[]) { // clear previous storage if any irohad->dropStorage(); + // Check if iroha daemon storage was successfully re-initialized + if (not irohad->storage) { + // Abort execution if not + log->error("Failed to re-initialize storage"); + daemon_status_notifier->notify( + ::iroha::utility_service::Status::kFailed); + return EXIT_FAILURE; + } const auto txs_num = block->transactions().size(); - if (auto e = iroha::expected::resultToOptionalError( - irohad->storage->insertBlock(std::move(block)))) { + auto inserted = irohad->storage->insertBlock(std::move(block)); + if (auto e = iroha::expected::resultToOptionalError(inserted)) { log->critical("Could not apply genesis block: {}", e.value()); return EXIT_FAILURE; } @@ -372,19 +406,17 @@ int main(int argc, char *argv[]) { "genesis block is provided. Please pecify new genesis block using " "--genesis_block parameter."); return EXIT_FAILURE; - } else { - if (overwrite) { - // no genesis, blockstore present, overwrite specified -> new block - // store, world state should be reset - irohad->resetWsv(); - if (not FLAGS_reuse_state) { - log->warn( - "No new genesis block is specified - blockstore will not be " - "overwritten. If you want overwrite ledger state, please " - "specify new genesis block using --genesis_block parameter. " - "If you want to reuse existing state data (WSV), consider the " - "--reuse_state flag."); - } + } else if (overwrite) { + // no genesis, blockstore present, overwrite specified -> new block + // store, world state should be reset + irohad->resetWsv(); + if (not FLAGS_reuse_state) { + log->warn( + "No new genesis block is specified - blockstore will not be " + "overwritten. If you want overwrite ledger state, please " + "specify new genesis block using --genesis_block parameter. " + "If you want to reuse existing state data (WSV), consider the " + "--reuse_state flag."); } } } @@ -427,6 +459,29 @@ int main(int argc, char *argv[]) { std::signal(SIGQUIT, handler); #endif + // start metrics + std::shared_ptr metrics; // Must be a pointer because 'this' is + // captured to lambdas in constructor. + std::string metrics_addr; + if (FLAGS_metrics_port.size()) { + metrics_addr = FLAGS_metrics_addr + ":" + FLAGS_metrics_port; + } else if (config.metrics_addr_port.size()) { + metrics_addr = config.metrics_addr_port; + } + if (metrics_addr.empty()) { + log->info("Skiping Metrics initialization."); + } else { + try { + metrics = + Metrics::create(metrics_addr, + irohad->storage, + log_manager->getChild("Metrics")->getLogger()); + log->info("Metrics listens on {}", metrics->getListenAddress()); + } catch (std::exception const &ex) { + log->warn("Failed to initialize Metrics: {}", ex.what()); + } + } + // runs iroha log->info("Running iroha"); auto run_result = irohad->run(); diff --git a/irohad/main/server_runner.cpp b/irohad/main/server_runner.cpp index dc15f309df2..8efd8c6e118 100644 --- a/irohad/main/server_runner.cpp +++ b/irohad/main/server_runner.cpp @@ -69,10 +69,6 @@ iroha::expected::Result ServerRunner::run() { builder.RegisterService(service.get()); } - // in order to bypass built-it limitation of gRPC message size - builder.SetMaxReceiveMessageSize(INT_MAX); - builder.SetMaxSendMessageSize(INT_MAX); - // enable retry policy builder.AddChannelArgument(GRPC_ARG_ENABLE_RETRIES, 1); diff --git a/irohad/main/startup_params.hpp b/irohad/main/startup_params.hpp index b6476a3850c..7c5b86e4629 100644 --- a/irohad/main/startup_params.hpp +++ b/irohad/main/startup_params.hpp @@ -7,11 +7,27 @@ #define IROHA_STARTUP_PARAMS_HPP namespace iroha { - /// Policy regarging possible existing WSV data at startup + /** + * Policy regarging possible existing WSV data at startup + */ enum class StartupWsvDataPolicy { - kReuse, //!< try to reuse existing data in the + kReuse, //!< try to reuse existing data in the WSV kDrop, //!< drop any existing state data }; + + enum class StorageType { + kPostgres, + kRocksDb, + }; + + /** + * Startup synchronization policy + */ + enum class StartupWsvSynchronizationPolicy { + kSyncUpAndGo, //!< sync up and continue execution + kWaitForNewBlocks, //!< enter endless loop to wait for new blocks added + //!< externally + }; } // namespace iroha #endif diff --git a/irohad/main/subscription.hpp b/irohad/main/subscription.hpp new file mode 100644 index 00000000000..d1644271511 --- /dev/null +++ b/irohad/main/subscription.hpp @@ -0,0 +1,43 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_SUBSCRIPTION_HPP +#define IROHA_SUBSCRIPTION_HPP + +#include + +#include "common/common.hpp" +#include "main/subscription_fwd.hpp" +#include "subscription/subscriber_impl.hpp" +#include "subscription/subscription_manager.hpp" + +namespace iroha { + std::shared_ptr getDispatcher(); + std::shared_ptr getSubscription(); + + template + struct SubscriberCreator { + template + static auto create(SubscriptionEngineHandlers tid, + F &&callback, + Args &&... args) { + auto subscriber = BaseSubscriber::create( + getSubscription()->getEngine(), + std::forward(args)...); + subscriber->setCallback( + [f{std::forward(callback)}](auto /*set_id*/, + auto &object, + auto event_key, + EventData args) mutable { + assert(key == event_key); + std::forward(f)(object, std::move(args)); + }); + subscriber->subscribe(0, key, tid); + return subscriber; + } + }; +} // namespace iroha + +#endif // IROHA_SUBSCRIPTION_HPP diff --git a/irohad/main/subscription_fwd.hpp b/irohad/main/subscription_fwd.hpp new file mode 100644 index 00000000000..8d5e371bc89 --- /dev/null +++ b/irohad/main/subscription_fwd.hpp @@ -0,0 +1,83 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_SUBSCRIPTION_FWD_HPP +#define IROHA_SUBSCRIPTION_FWD_HPP + +#include + +namespace iroha { + enum SubscriptionEngineHandlers { + kYac = 0, + kRequestProposal, + kVoteProcess, + kMetrics, + //--------------- + kTotalCount + }; + + enum EventTypes { + kOnOutcome = 0, + kOnSynchronization, + kOnInitialSynchronization, + kOnCurrentRoundPeers, + kOnRoundSwitch, + kOnProposal, + kOnVerifiedProposal, + kOnProcessedHashes, + kOnOutcomeFromYac, + kOnOutcomeDelayed, + kOnBlock, + kOnInitialBlock, + kOnBlockCreatorEvent, + kOnFinalizedTxs, + kOnApplyState, + kOnNeedProposal, + kOnNewProposal, + kOnNewBatchInCache, + kOnPackProposal, + kOnProposalResponse, + kOnTransactionResponse, + kOnConsensusGateEvent, + + // MST + kOnStateUpdate, + kOnPreparedBatches, + kOnExpiredBatches, + + // YAC + kTimer, + kOnState, + + // TEST + kOnTestOperationComplete + }; + + static constexpr uint32_t kThreadPoolSize = 3u; + + namespace subscription { + struct IDispatcher; + + template + class SubscriptionManager; + + template + class SubscriberImpl; + } // namespace subscription + + using Dispatcher = subscription::IDispatcher; + using Subscription = + subscription::SubscriptionManager; + template + using BaseSubscriber = subscription:: + SubscriberImpl; + +} // namespace iroha + +#endif // IROHA_SUBSCRIPTION_FWD_HPP diff --git a/irohad/maintenance/CMakeLists.txt b/irohad/maintenance/CMakeLists.txt new file mode 100644 index 00000000000..b8b860ed8fb --- /dev/null +++ b/irohad/maintenance/CMakeLists.txt @@ -0,0 +1,10 @@ +# +# Copyright Soramitsu Co., Ltd. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +# + +add_library(maintenance metrics.cpp) +target_link_libraries(maintenance + prometheus-cpp::core prometheus-cpp::pull + async_subscription +) diff --git a/irohad/maintenance/metrics.cpp b/irohad/maintenance/metrics.cpp new file mode 100644 index 00000000000..ab950081210 --- /dev/null +++ b/irohad/maintenance/metrics.cpp @@ -0,0 +1,138 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "maintenance/metrics.hpp" + +#include +#include +#include + +#include +#include + +#include "CivetServer.h" // for CivetCallbacks +#include "interfaces/commands/add_peer.hpp" +#include "interfaces/commands/command.hpp" +#include "interfaces/commands/create_domain.hpp" +#include "interfaces/commands/remove_peer.hpp" +#include "interfaces/iroha_internal/block.hpp" +#include "interfaces/transaction.hpp" +#include "logger/logger.hpp" +#include "main/subscription.hpp" + +using namespace iroha; +using namespace prometheus; + +Metrics::Metrics(std::string const &listen_addr, + std::shared_ptr storage, + logger::LoggerPtr const &logger) + : storage_(storage), logger_(logger) { + static const std::regex full_matcher( + "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-" + "9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):[0-9]+$"); + static const std::regex port_matcher("^:?([0-9]{1,5})$"); + if (std::regex_match(listen_addr, full_matcher)) { + listen_addr_port_ = listen_addr; + } else if (std::regex_match(listen_addr, port_matcher)) { + listen_addr_port_ = "127.0.0.1"; + if (listen_addr[0] != ':') + listen_addr_port_ += ":"; + listen_addr_port_ += listen_addr; + } else { + throw std::runtime_error("Metrics does not accept listen address '" + + listen_addr + "'"); + } + + // @note it's the users responsibility to keep the object alive + registry_ = std::make_shared(); + + CivetCallbacks cvcbs; + auto civet_no_log = [](const struct mg_connection *conn, + const char *message) { return 1; }; + cvcbs.log_message = civet_no_log; + cvcbs.log_access = civet_no_log; + + // create an http server running on addr:port + exposer_ = std::make_shared(listen_addr_port_, + /*num_threads*/ 2, + &cvcbs); + + // ask the exposer_ to scrape the registry_ on incoming HTTP requests + exposer_->RegisterCollectable(registry_, "/metrics"); + + auto &block_height_gauge = BuildGauge() + .Name("blocks_height") + .Help("Total number of blocks in chain") + .Register(*registry_); + auto &block_height = block_height_gauge.Add({}); + block_height.Set(storage_->getBlockQuery()->getTopBlockHeight()); + + auto &peers_number_gauge = + BuildGauge() + .Name("peers_number") + .Help("Total number peers to send transactions and request proposals") + .Register(*registry_); + auto &number_of_peers = peers_number_gauge.Add({}); + number_of_peers.Set(storage_->getWsvQuery()->getPeers()->size()); + + auto &domains_number_gauge = BuildGauge() + .Name("number_of_domains") + .Help("Total number of domains in WSV") + .Register(*registry_); + auto &domains_number = domains_number_gauge.Add({}); + domains_number.Set(storage_->getWsvQuery()->countDomains().assumeValue()); + + auto &total_number_of_transactions_gauge = + BuildGauge() + .Name("total_number_of_transactions") + .Help("Total number of transactions in blockchain") + .Register(*registry_); + auto &total_number_of_transactions = + total_number_of_transactions_gauge.Add({}); + total_number_of_transactions.Set( + storage_->getWsvQuery()->countTransactions().assumeValue()); + + auto &number_of_signatures_in_last_block_gauge = + BuildGauge() + .Name("number_of_signatures_in_last_block") + .Help("Number of signatures in last block") + .Register(*registry_); + auto &number_of_signatures_in_last_block = + number_of_signatures_in_last_block_gauge.Add({}); + auto ptopblock = + storage_->getBlockQuery() + ->getBlock(storage_->getBlockQuery()->getTopBlockHeight()) + .assumeValue(); + number_of_signatures_in_last_block.Set(boost::size(ptopblock->signatures())); + + block_subscriber_ = + SubscriberCreator::template create( + SubscriptionEngineHandlers::kMetrics, + [&, wregistry = std::weak_ptr(registry_)](auto &, + BlockPtr pblock) { + // Metrics values are stored inside and owned by registry, + // capture them by reference is legal. + std::shared_ptr registry{wregistry}; // throw if expired + assert(pblock); + block_height.Set(pblock->height()); + number_of_signatures_in_last_block.Set( + boost::size(pblock->signatures())); + total_number_of_transactions.Increment( + boost::size(pblock->transactions())); + logger_->info("total_number_of_transactions {}", + total_number_of_transactions.Value()); + int domains_diff = 0, peers_diff = 0; + using namespace shared_model::interface; + for (Transaction const &trx : pblock->transactions()) { + for (Command const &cmd : trx.commands()) { + domains_diff += cmd.is() ? 1 : 0; + peers_diff += cmd.is() ? 1 : 0; + peers_diff -= cmd.is() ? 1 : 0; + } + } + number_of_peers.Increment(peers_diff); + domains_number.Increment(domains_diff); + }); +} diff --git a/irohad/maintenance/metrics.hpp b/irohad/maintenance/metrics.hpp new file mode 100644 index 00000000000..c3cf39d5b2d --- /dev/null +++ b/irohad/maintenance/metrics.hpp @@ -0,0 +1,58 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_MAINTENANCE_METRICS_HPP +#define IROHA_MAINTENANCE_METRICS_HPP + +#include +#include + +#include +#include +#include +#include + +#include "ametsuchi/storage.hpp" +#include "ametsuchi/wsv_query.hpp" +#include "interfaces/common_objects/types.hpp" +#include "interfaces/iroha_internal/block.hpp" +#include "logger/logger_fwd.hpp" +#include "main/subscription.hpp" +#include "network/ordering_gate_common.hpp" + +class Metrics : public std::enable_shared_from_this { + using OnProposalSubscription = iroha::BaseSubscriber< + bool, + iroha::network::OrderingEvent>; // FixMe subscribtion ≠ subscriber + using BlockPtr = std::shared_ptr; + using BlockSubscriber = iroha::BaseSubscriber; + + std::string listen_addr_port_; + std::shared_ptr exposer_; + std::shared_ptr registry_; + std::shared_ptr storage_; + std::shared_ptr block_subscriber_; + std::shared_ptr on_proposal_subscription_; + logger::LoggerPtr logger_; + + Metrics(std::string const &listen_addr, + std::shared_ptr storage, + logger::LoggerPtr const &logger); + + public: + std::string const &getListenAddress() const { + return listen_addr_port_; + } + + template + static std::shared_ptr create(Ts &&... args) { + struct Resolver : Metrics { + Resolver(Ts &&... args) : Metrics(std::forward(args)...) {} + }; + return std::make_shared(std::forward(args)...); + } +}; + +#endif // IROHA_MAINTENANCE_METRICS_HPP diff --git a/irohad/model/CMakeLists.txt b/irohad/model/CMakeLists.txt index 497c686627a..d54a117d62f 100644 --- a/irohad/model/CMakeLists.txt +++ b/irohad/model/CMakeLists.txt @@ -20,7 +20,6 @@ add_library(model target_link_libraries(model common - rxcpp ) add_library(model_crypto_provider diff --git a/irohad/model/commit.hpp b/irohad/model/commit.hpp deleted file mode 100644 index 274253124b8..00000000000 --- a/irohad/model/commit.hpp +++ /dev/null @@ -1,15 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_COMMIT_HPP -#define IROHA_COMMIT_HPP - -#include - -namespace iroha { - using OldCommit = rxcpp::observable; -} // namespace iroha - -#endif // IROHA_COMMIT_HPP diff --git a/irohad/multi_sig_transactions/state/impl/mst_state.cpp b/irohad/multi_sig_transactions/state/impl/mst_state.cpp index 93f5e9d22c2..7854a74c2b6 100644 --- a/irohad/multi_sig_transactions/state/impl/mst_state.cpp +++ b/irohad/multi_sig_transactions/state/impl/mst_state.cpp @@ -16,7 +16,6 @@ #include #include #include -#include "common/set.hpp" #include "interfaces/transaction.hpp" #include "logger/logger.hpp" diff --git a/irohad/multi_sig_transactions/storage/impl/mst_storage.cpp b/irohad/multi_sig_transactions/storage/impl/mst_storage.cpp index e7aa4c4ba51..8d0115ddda2 100644 --- a/irohad/multi_sig_transactions/storage/impl/mst_storage.cpp +++ b/irohad/multi_sig_transactions/storage/impl/mst_storage.cpp @@ -43,4 +43,10 @@ namespace iroha { bool MstStorage::batchInStorage(const DataType &batch) const { return batchInStorageImpl(batch); } + + void MstStorage::processFinalizedTransaction( + shared_model::interface::types::HashType const &hash) { + std::lock_guard lock{mutex_}; + processFinalizedTransactionImpl(hash); + } } // namespace iroha diff --git a/irohad/multi_sig_transactions/storage/impl/mst_storage_impl.cpp b/irohad/multi_sig_transactions/storage/impl/mst_storage_impl.cpp index 151e2412684..57e59df2b25 100644 --- a/irohad/multi_sig_transactions/storage/impl/mst_storage_impl.cpp +++ b/irohad/multi_sig_transactions/storage/impl/mst_storage_impl.cpp @@ -21,8 +21,7 @@ namespace iroha { return target_state_iter; } // -----------------------------| interface API |----------------------------- - MstStorageStateImpl::MstStorageStateImpl(MstStorageStateImpl::private_tag, - CompleterType const &completer, + MstStorageStateImpl::MstStorageStateImpl(CompleterType const &completer, logger::LoggerPtr mst_state_logger, logger::LoggerPtr log) : MstStorage(log), @@ -30,36 +29,6 @@ namespace iroha { own_state_(MstState::empty(mst_state_logger, completer_)), mst_state_logger_(std::move(mst_state_logger)) {} - std::shared_ptr MstStorageStateImpl::create( - CompleterType const &completer, - rxcpp::observable finalized_txs, - logger::LoggerPtr mst_state_logger, - logger::LoggerPtr log) { - auto storage = std::make_shared( - MstStorageStateImpl::private_tag{}, - completer, - std::move(mst_state_logger), - std::move(log)); - std::weak_ptr storage_(storage); - - auto subscription = rxcpp::composite_subscription(); - finalized_txs.subscribe( - subscription, - [storage_, - subscription](shared_model::interface::types::HashType const &hash) { - if (auto storage = storage_.lock()) { - for (auto &p : storage->peer_states_) { - p.second.eraseByTransactionHash(hash); - } - storage->own_state_.eraseByTransactionHash(hash); - } else { - subscription.unsubscribe(); - } - }); - - return storage; - } - auto MstStorageStateImpl::applyImpl( shared_model::interface::types::PublicKeyHexStringView target_peer_key, const MstState &new_state) @@ -102,4 +71,12 @@ namespace iroha { return own_state_.contains(batch); } + void MstStorageStateImpl::processFinalizedTransactionImpl( + shared_model::interface::types::HashType const &hash) { + for (auto &p : peer_states_) { + p.second.eraseByTransactionHash(hash); + } + own_state_.eraseByTransactionHash(hash); + } + } // namespace iroha diff --git a/irohad/multi_sig_transactions/storage/mst_storage.hpp b/irohad/multi_sig_transactions/storage/mst_storage.hpp index 8a05959e431..5ac0cb42b96 100644 --- a/irohad/multi_sig_transactions/storage/mst_storage.hpp +++ b/irohad/multi_sig_transactions/storage/mst_storage.hpp @@ -75,6 +75,9 @@ namespace iroha { */ bool batchInStorage(const DataType &batch) const; + void processFinalizedTransaction( + shared_model::interface::types::HashType const &hash); + virtual ~MstStorage() = default; protected: @@ -107,6 +110,9 @@ namespace iroha { virtual bool batchInStorageImpl(const DataType &batch) const = 0; + virtual void processFinalizedTransactionImpl( + shared_model::interface::types::HashType const &hash) = 0; + // -------------------------------| fields |-------------------------------- mutable std::mutex mutex_; diff --git a/irohad/multi_sig_transactions/storage/mst_storage_impl.hpp b/irohad/multi_sig_transactions/storage/mst_storage_impl.hpp index 35c8f9f6217..bf5352dded7 100644 --- a/irohad/multi_sig_transactions/storage/mst_storage_impl.hpp +++ b/irohad/multi_sig_transactions/storage/mst_storage_impl.hpp @@ -9,7 +9,6 @@ #include #include -#include #include "logger/logger_fwd.hpp" #include "multi_sig_transactions/hash.hpp" #include "multi_sig_transactions/storage/mst_storage.hpp" @@ -17,8 +16,6 @@ namespace iroha { class MstStorageStateImpl : public MstStorage { private: - struct private_tag {}; - // -----------------------------| private API |----------------------------- /** @@ -32,21 +29,13 @@ namespace iroha { public: // ----------------------------| interface API |---------------------------- - MstStorageStateImpl(MstStorageStateImpl::private_tag, - CompleterType const &completer, + MstStorageStateImpl(CompleterType const &completer, logger::LoggerPtr mst_state_logger, logger::LoggerPtr log); MstStorageStateImpl(MstStorageStateImpl const &) = delete; MstStorageStateImpl &operator=(MstStorageStateImpl const &) = delete; - static std::shared_ptr create( - CompleterType const &completer, - rxcpp::observable - finalized_txs, - logger::LoggerPtr mst_state_logger, - logger::LoggerPtr log); - auto applyImpl( shared_model::interface::types::PublicKeyHexStringView target_peer_key, const MstState &new_state) @@ -68,6 +57,9 @@ namespace iroha { bool batchInStorageImpl(const DataType &batch) const override; + void processFinalizedTransactionImpl( + shared_model::interface::types::HashType const &hash) override; + private: // ---------------------------| private fields |---------------------------- diff --git a/irohad/multi_sig_transactions/transport/impl/mst_transport_grpc.cpp b/irohad/multi_sig_transactions/transport/impl/mst_transport_grpc.cpp index fde4deae264..27093998ffe 100644 --- a/irohad/multi_sig_transactions/transport/impl/mst_transport_grpc.cpp +++ b/irohad/multi_sig_transactions/transport/impl/mst_transport_grpc.cpp @@ -3,12 +3,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include "common/default_constructible_unary_fn.hpp" // non-copyable value workaround - #include "multi_sig_transactions/transport/mst_transport_grpc.hpp" -#include -#include #include #include #include "ametsuchi/tx_presence_cache.hpp" diff --git a/irohad/network/CMakeLists.txt b/irohad/network/CMakeLists.txt index 8e6cf58a860..3145678aeaa 100644 --- a/irohad/network/CMakeLists.txt +++ b/irohad/network/CMakeLists.txt @@ -6,7 +6,6 @@ add_library(networking ) target_link_libraries(networking - rxcpp shared_model_interfaces synchronizer logger @@ -19,7 +18,6 @@ add_library(block_loader target_link_libraries(block_loader grpc_generic_client_factory loader_grpc - rxcpp shared_model_interfaces shared_model_proto_backend schema diff --git a/irohad/network/block_loader.hpp b/irohad/network/block_loader.hpp index 83c5f96c11e..bc38119633e 100644 --- a/irohad/network/block_loader.hpp +++ b/irohad/network/block_loader.hpp @@ -7,13 +7,29 @@ #define IROHA_BLOCK_LOADER_HPP #include -#include +#include #include "interfaces/common_objects/types.hpp" #include "interfaces/iroha_internal/block.hpp" namespace iroha { namespace network { + class BlockReader { + public: + /** + * Try to read the next block. Returns iteration_complete when the + * iteration is completed, std::string when an error occurred + */ + struct iteration_complete {}; + virtual std::variant< + iteration_complete, + std::shared_ptr, + std::string> + read() = 0; + + virtual ~BlockReader() = default; + }; + /** * Interface for downloading blocks from a network */ @@ -25,12 +41,10 @@ namespace iroha { * @param peer_pubkey - peer for requesting blocks * @return */ - virtual iroha::expected::Result< - rxcpp::observable>, - std::string> - retrieveBlocks(const shared_model::interface::types::HeightType height, - shared_model::interface::types::PublicKeyHexStringView - peer_pubkey) = 0; + virtual expected::Result> retrieveBlocks( + const shared_model::interface::types::HeightType height, + shared_model::interface::types::PublicKeyHexStringView + peer_pubkey) = 0; /** * Retrieve block by its block_height from given peer diff --git a/irohad/network/consensus_gate.hpp b/irohad/network/consensus_gate.hpp index f7f61e3dc0e..0a2a1a1a873 100644 --- a/irohad/network/consensus_gate.hpp +++ b/irohad/network/consensus_gate.hpp @@ -6,16 +6,6 @@ #ifndef IROHA_CONSENSUS_GATE_HPP #define IROHA_CONSENSUS_GATE_HPP -#include -#include "consensus/gate_object.hpp" - -namespace shared_model { - namespace interface { - class Block; - class Proposal; - } // namespace interface -} // namespace shared_model - namespace iroha { namespace simulator { @@ -29,20 +19,11 @@ namespace iroha { */ class ConsensusGate { public: - using Round = consensus::Round; - /** * Vote for given block creator event in consensus */ virtual void vote(const simulator::BlockCreatorEvent &event) = 0; - using GateObject = consensus::GateObject; - - /** - * @return emit gate responses - */ - virtual rxcpp::observable onOutcome() = 0; - /// Prevent any new outgoing network activity. Be passive. virtual void stop() = 0; diff --git a/irohad/network/impl/async_grpc_client.hpp b/irohad/network/impl/async_grpc_client.hpp index 2dd9ebe299f..d8405ee549c 100644 --- a/irohad/network/impl/async_grpc_client.hpp +++ b/irohad/network/impl/async_grpc_client.hpp @@ -37,7 +37,9 @@ namespace iroha { while (cq_.Next(&got_tag, &ok)) { auto call = static_cast(got_tag); if (not call->status.ok()) { - log_->warn("RPC failed: {}", call->status.error_message()); + log_->warn("RPC failed: {} {}", + call->context.peer(), + call->status.error_message()); } if (call->on_response) { call->on_response(call->status, call->reply); diff --git a/irohad/network/impl/block_loader_impl.cpp b/irohad/network/impl/block_loader_impl.cpp index 6262429bbd2..c984e8d066a 100644 --- a/irohad/network/impl/block_loader_impl.cpp +++ b/irohad/network/impl/block_loader_impl.cpp @@ -10,7 +10,6 @@ #include #include -#include #include "backend/protobuf/block.hpp" #include "builders/protobuf/transport_builder.hpp" #include "common/bind.hpp" @@ -25,6 +24,59 @@ using namespace iroha::network; using namespace shared_model::crypto; using namespace shared_model::interface; +namespace { + class BlockReaderImpl : public BlockReader { + public: + BlockReaderImpl( + std::weak_ptr block_factory, + std::unique_ptr client, + proto::BlockRequest request) + : block_factory_(std::move(block_factory)), + client_(std::move(client)), + reader_(client_->retrieveBlocks(&context_, std::move(request))) { + context_.set_deadline(std::chrono::system_clock::now() + + std::chrono::minutes(1ull)); + } + + std::variant, + std::string> + read() override { + iroha::protocol::Block proto_block; + auto maybe_block_factory = block_factory_.lock(); + if (not maybe_block_factory) { + return fmt::format("Failed to lock block factory"); + } + + if (not reader_->Read(&proto_block)) { + auto status = reader_->Finish(); + if (not status.ok()) { + return fmt::format("Failed to read block: {}", + status.error_message()); + } + return iteration_complete{}; + } + + auto maybe_block = + maybe_block_factory->createBlock(std::move(proto_block)); + if (hasError(maybe_block)) { + context_.TryCancel(); + return fmt::format("Failed to parse received block: {}", + std::move(maybe_block).assumeError()); + } + + return std::move(maybe_block).assumeValue(); + } + + private: + std::weak_ptr block_factory_; + grpc::ClientContext context_; + std::unique_ptr client_; + std::unique_ptr> + reader_; + }; +} // namespace + BlockLoaderImpl::BlockLoaderImpl( std::shared_ptr peer_query_factory, std::shared_ptr factory, @@ -35,44 +87,26 @@ BlockLoaderImpl::BlockLoaderImpl( client_factory_(std::move(client_factory)), log_(std::move(log)) {} -Result>, std::string> -BlockLoaderImpl::retrieveBlocks( +Result> BlockLoaderImpl::retrieveBlocks( const shared_model::interface::types::HeightType height, types::PublicKeyHexStringView peer_pubkey) { - return findPeer(peer_pubkey) | [&](const auto &peer) { - return client_factory_->createClient(*peer) | [&](auto client) { - std::shared_ptr shared_client( - std::move(client)); - return rxcpp::observable>( - rxcpp::observable<>::create>( - [height, shared_client, block_factory = block_factory_]( - auto subscriber) { - grpc::ClientContext context; - proto::BlockRequest request; - request.set_height(height - + 1); // request next block to our top - auto reader = shared_client->retrieveBlocks(&context, request); - protocol::Block block; - while (subscriber.is_subscribed() and reader->Read(&block)) { - block_factory->createBlock(std::move(block)) - .match( - [&](auto &&result) { - subscriber.on_next(std::move(result.value)); - }, - [&](const auto &error) { - context.TryCancel(); - reader->Finish(); - subscriber.on_error(std::make_exception_ptr( - std::runtime_error(fmt::format( - "Failed to parse received block: {}.", - error.error)))); - }); - } - reader->Finish(); - subscriber.on_completed(); - })); - }; - }; + auto maybe_peer = findPeer(peer_pubkey); + if (hasError(maybe_peer)) { + return maybe_peer.assumeError(); + } + + auto maybe_client = client_factory_->createClient(*maybe_peer.assumeValue()); + if (hasError(maybe_client)) { + return maybe_client.assumeError(); + } + + proto::BlockRequest request; + request.set_height(height + 1); // request next block to our top + + return std::make_unique( + block_factory_, + std::move(maybe_client).assumeValue(), + std::move(request)); } Result, std::string> BlockLoaderImpl::retrieveBlock( diff --git a/irohad/network/impl/block_loader_impl.hpp b/irohad/network/impl/block_loader_impl.hpp index 55fdb2c9191..795460ceb93 100644 --- a/irohad/network/impl/block_loader_impl.hpp +++ b/irohad/network/impl/block_loader_impl.hpp @@ -33,12 +33,10 @@ namespace iroha { logger::LoggerPtr log, std::unique_ptr client_factory); - iroha::expected::Result< - rxcpp::observable>, - std::string> - retrieveBlocks(const shared_model::interface::types::HeightType height, - shared_model::interface::types::PublicKeyHexStringView - peer_pubkey) override; + expected::Result> retrieveBlocks( + const shared_model::interface::types::HeightType height, + shared_model::interface::types::PublicKeyHexStringView peer_pubkey) + override; iroha::expected::Result, std::string> diff --git a/irohad/network/impl/channel_factory.cpp b/irohad/network/impl/channel_factory.cpp index 9bfdbb0eb04..ae2e01b321b 100644 --- a/irohad/network/impl/channel_factory.cpp +++ b/irohad/network/impl/channel_factory.cpp @@ -15,7 +15,6 @@ using namespace iroha::expected; using namespace iroha::network; -using namespace std::literals::chrono_literals; using iroha::operator|; @@ -23,24 +22,6 @@ std::string makeJsonString(const std::string &val) { return fmt::format("\"{}\"", val); } -std::unique_ptr iroha::network::getDefaultChannelParams() { - static const auto retry_policy = [] { - GrpcChannelParams::RetryPolicy retry_policy; - retry_policy.max_attempts = 5u; - retry_policy.initial_backoff = 5s; - retry_policy.max_backoff = 120s; - retry_policy.backoff_multiplier = 1.6f; - retry_policy.retryable_status_codes = { - "UNKNOWN", "DEADLINE_EXCEEDED", "ABORTED", "INTERNAL", "UNAVAILABLE"}; - return retry_policy; - }(); - auto params = std::make_unique(); - params->max_request_message_bytes = std::numeric_limits::max(); - params->max_response_message_bytes = std::numeric_limits::max(); - params->retry_policy = retry_policy; - return params; -} - grpc::ChannelArguments iroha::network::detail::makeInterPeerChannelArguments( const std::set &services, const GrpcChannelParams ¶ms) { return detail::makeChannelArguments(services, params); @@ -102,34 +83,43 @@ grpc::ChannelArguments iroha::network::detail::makeChannelArguments( std::shared_ptr iroha::network::createInsecureChannel( const shared_model::interface::types::AddressType &address, const std::string &service_full_name, - const GrpcChannelParams ¶ms) { - return grpc::CreateCustomChannel( - address, - grpc::InsecureChannelCredentials(), - detail::makeInterPeerChannelArguments({service_full_name}, params)); + std::optional> + maybe_params) { + if (not maybe_params) + return grpc::CreateChannel(address, grpc::InsecureChannelCredentials()); + + return grpc::CreateCustomChannel(address, + grpc::InsecureChannelCredentials(), + detail::makeInterPeerChannelArguments( + {service_full_name}, *maybe_params)); } class ChannelFactory::ChannelArgumentsProvider { public: - ChannelArgumentsProvider(std::shared_ptr params) - : params_(std::move(params)) {} + ChannelArgumentsProvider( + std::optional> maybe_params) + : maybe_params_(std::move(maybe_params)) {} const grpc::ChannelArguments &get(const std::string &service_full_name) { - if (service_names_.count(service_full_name) == 0) { + if (maybe_params_ and service_names_.count(service_full_name) == 0) { service_names_.emplace(service_full_name); - args_ = detail::makeInterPeerChannelArguments(service_names_, *params_); + args_ = detail::makeInterPeerChannelArguments(service_names_, + *maybe_params_.value()); } return args_; } private: - std::shared_ptr params_; + std::optional> maybe_params_; std::set service_names_; grpc::ChannelArguments args_; }; -ChannelFactory::ChannelFactory(std::shared_ptr params) - : args_(std::make_unique(std::move(params))) {} +ChannelFactory::ChannelFactory( + std::optional> maybe_params) + : args_( + std::make_unique(std::move(maybe_params))) { +} ChannelFactory::~ChannelFactory() = default; diff --git a/irohad/network/impl/channel_factory.hpp b/irohad/network/impl/channel_factory.hpp index aa8fac2eafa..c6b5381f263 100644 --- a/irohad/network/impl/channel_factory.hpp +++ b/irohad/network/impl/channel_factory.hpp @@ -30,12 +30,6 @@ namespace iroha { const GrpcChannelParams ¶ms); } // namespace detail - /** - * Creates client params which enable sending and receiving messages of - * INT_MAX bytes size with retries (see implementation for details). - */ - std::unique_ptr getDefaultChannelParams(); - /** * Creates channel arguments for inter-peer communication. * @tparam Service type for gRPC stub, e.g. proto::Yac @@ -53,15 +47,16 @@ namespace iroha { * Creates a channel * @tparam Service type for gRPC stub, e.g. proto::Yac * @param address ip address and port for connection, ipv4:port - * @param params grpc channel params + * @param maybe_params grpc channel params * @return grpc channel with provided params */ template std::shared_ptr createInsecureChannel( const shared_model::interface::types::AddressType &address, - const GrpcChannelParams ¶ms) { + std::optional> + maybe_params) { return createInsecureChannel( - address, Service::service_full_name(), params); + address, Service::service_full_name(), maybe_params); } /** @@ -69,24 +64,26 @@ namespace iroha { * @param address ip address and port to connect to, ipv4:port * @param service_full_name gRPC service full name, * e.g. iroha.consensus.yac.proto.Yac - * @param params grpc channel params + * @param maybe_params grpc channel params * @return grpc channel with provided params */ std::shared_ptr createInsecureChannel( const shared_model::interface::types::AddressType &address, const std::string &service_full_name, - const GrpcChannelParams ¶ms); + std::optional> + maybe_params); /** * Creates client * @tparam Service type for gRPC stub, e.g. proto::Yac * @param address ip address and port for connection, ipv4:port - * @param params grpc channel params + * @param maybe_params grpc channel params * @return gRPC stub of parametrized type */ template std::unique_ptr createInsecureClient( - const std::string &address, const GrpcChannelParams ¶ms) { + const std::string &address, + std::optional> params) { return Service::NewStub(createInsecureChannel(address, params)); } @@ -100,15 +97,19 @@ namespace iroha { */ template std::unique_ptr createInsecureClient( - const std::string &ip, size_t port, const GrpcChannelParams ¶ms) { + const std::string &ip, + size_t port, + std::optional> + maybe_params) { return createInsecureClient(ip + ":" + std::to_string(port), - params); + maybe_params); } class ChannelFactory : public ChannelProvider { public: /// @param params grpc channel params - ChannelFactory(std::shared_ptr params); + ChannelFactory( + std::optional> maybe_params); ~ChannelFactory() override; diff --git a/irohad/network/impl/channel_factory_tls.cpp b/irohad/network/impl/channel_factory_tls.cpp index 916ce6fbc78..dea3035816c 100644 --- a/irohad/network/impl/channel_factory_tls.cpp +++ b/irohad/network/impl/channel_factory_tls.cpp @@ -14,11 +14,11 @@ using namespace iroha::expected; using namespace iroha::network; ChannelFactoryTls::ChannelFactoryTls( - std::shared_ptr params, - boost::optional> + std::optional> maybe_params, + std::optional> peer_cert_provider, - boost::optional> my_creds) - : ChannelFactory(std::move(params)), + std::optional> my_creds) + : ChannelFactory(std::move(maybe_params)), peer_cert_provider_(std::move(peer_cert_provider)), my_creds_(std::move(my_creds)) {} diff --git a/irohad/network/impl/channel_factory_tls.hpp b/irohad/network/impl/channel_factory_tls.hpp index ab480c98dae..c0bcb15ebf5 100644 --- a/irohad/network/impl/channel_factory_tls.hpp +++ b/irohad/network/impl/channel_factory_tls.hpp @@ -19,10 +19,10 @@ namespace iroha { class ChannelFactoryTls : public ChannelFactory { public: ChannelFactoryTls( - std::shared_ptr params, - boost::optional> + std::optional> maybe_params, + std::optional> peer_cert_provider, - boost::optional> my_creds); + std::optional> my_creds); protected: iroha::expected::Result, @@ -31,9 +31,9 @@ namespace iroha { const shared_model::interface::Peer &peer) const override; private: - boost::optional> + std::optional> peer_cert_provider_; - boost::optional> my_creds_; + std::optional> my_creds_; }; } // namespace network diff --git a/irohad/network/impl/grpc_channel_params.hpp b/irohad/network/impl/grpc_channel_params.hpp index f1825f7b8fd..def5ad42acf 100644 --- a/irohad/network/impl/grpc_channel_params.hpp +++ b/irohad/network/impl/grpc_channel_params.hpp @@ -8,8 +8,7 @@ #include #include - -#include +#include namespace iroha { namespace network { @@ -24,7 +23,7 @@ namespace iroha { }; unsigned int max_request_message_bytes; unsigned int max_response_message_bytes; - boost::optional retry_policy; + std::optional retry_policy; }; } // namespace network diff --git a/irohad/network/impl/peer_communication_service_impl.cpp b/irohad/network/impl/peer_communication_service_impl.cpp index 5071a4597fd..a695f2185ec 100644 --- a/irohad/network/impl/peer_communication_service_impl.cpp +++ b/irohad/network/impl/peer_communication_service_impl.cpp @@ -5,25 +5,15 @@ #include "network/impl/peer_communication_service_impl.hpp" -#include #include "interfaces/iroha_internal/transaction_batch.hpp" #include "logger/logger.hpp" #include "network/ordering_gate.hpp" -#include "simulator/verified_proposal_creator.hpp" -#include "synchronizer/synchronizer.hpp" namespace iroha { namespace network { PeerCommunicationServiceImpl::PeerCommunicationServiceImpl( - std::shared_ptr ordering_gate, - std::shared_ptr synchronizer, - std::shared_ptr - proposal_creator, - logger::LoggerPtr log) - : ordering_gate_(std::move(ordering_gate)), - synchronizer_(std::move(synchronizer)), - proposal_creator_(std::move(proposal_creator)), - log_{std::move(log)} {} + std::shared_ptr ordering_gate, logger::LoggerPtr log) + : ordering_gate_(std::move(ordering_gate)), log_{std::move(log)} {} void PeerCommunicationServiceImpl::propagate_batch( std::shared_ptr batch) @@ -31,20 +21,5 @@ namespace iroha { log_->info("propagate batch"); ordering_gate_->propagateBatch(batch); } - - rxcpp::observable PeerCommunicationServiceImpl::onProposal() - const { - return ordering_gate_->onProposal(); - } - - rxcpp::observable - PeerCommunicationServiceImpl::onVerifiedProposal() const { - return proposal_creator_->onVerifiedProposal(); - } - - rxcpp::observable - PeerCommunicationServiceImpl::onSynchronization() const { - return synchronizer_->on_commit_chain(); - } } // namespace network } // namespace iroha diff --git a/irohad/network/impl/peer_communication_service_impl.hpp b/irohad/network/impl/peer_communication_service_impl.hpp index 32d89330449..f508ecae4f4 100644 --- a/irohad/network/impl/peer_communication_service_impl.hpp +++ b/irohad/network/impl/peer_communication_service_impl.hpp @@ -11,41 +11,20 @@ #include "logger/logger_fwd.hpp" namespace iroha { - namespace simulator { - class VerifiedProposalCreator; - } // namespace simulator - - namespace synchronizer { - class Synchronizer; - } // namespace synchronizer - namespace network { class OrderingGate; class PeerCommunicationServiceImpl : public PeerCommunicationService { public: - PeerCommunicationServiceImpl( - std::shared_ptr ordering_gate, - std::shared_ptr synchronizer, - std::shared_ptr proposal_creator, - logger::LoggerPtr log); + PeerCommunicationServiceImpl(std::shared_ptr ordering_gate, + logger::LoggerPtr log); void propagate_batch( std::shared_ptr batch) const override; - rxcpp::observable onProposal() const override; - - rxcpp::observable - onVerifiedProposal() const override; - - rxcpp::observable onSynchronization() - const override; - private: std::shared_ptr ordering_gate_; - std::shared_ptr synchronizer_; - std::shared_ptr proposal_creator_; logger::LoggerPtr log_; }; } // namespace network diff --git a/irohad/network/ordering_gate.hpp b/irohad/network/ordering_gate.hpp index 10c5cd4e892..82f05ef22a6 100644 --- a/irohad/network/ordering_gate.hpp +++ b/irohad/network/ordering_gate.hpp @@ -8,7 +8,6 @@ #include -#include #include "network/ordering_gate_common.hpp" #include "network/peer_communication_service.hpp" @@ -34,12 +33,6 @@ namespace iroha { virtual void propagateBatch( std::shared_ptr batch) = 0; - /** - * Return observable of all proposals in the consensus - * @return observable with notifications - */ - virtual rxcpp::observable onProposal() = 0; - virtual ~OrderingGate() = default; /// Prevent any new outgoing network activity. Be passive. diff --git a/irohad/network/ordering_gate_common.hpp b/irohad/network/ordering_gate_common.hpp index 7086a4fa96e..48f5bc4000d 100644 --- a/irohad/network/ordering_gate_common.hpp +++ b/irohad/network/ordering_gate_common.hpp @@ -7,8 +7,8 @@ #define IROHA_ORDERING_GATE_COMMON_HPP #include +#include -#include #include "ametsuchi/ledger_state.hpp" #include "consensus/round.hpp" @@ -25,7 +25,7 @@ namespace iroha { * Event, which is emitted by ordering gate, when it requests a proposal */ struct OrderingEvent { - boost::optional> + std::optional> proposal; consensus::Round round; std::shared_ptr ledger_state; diff --git a/irohad/network/peer_communication_service.hpp b/irohad/network/peer_communication_service.hpp index 287fc435991..09945fe9cc4 100644 --- a/irohad/network/peer_communication_service.hpp +++ b/irohad/network/peer_communication_service.hpp @@ -6,10 +6,7 @@ #ifndef IROHA_PEER_COMMUNICATION_SERVICE_HPP #define IROHA_PEER_COMMUNICATION_SERVICE_HPP -#include #include "network/ordering_gate_common.hpp" -#include "simulator/verified_proposal_creator_common.hpp" -#include "synchronizer/synchronizer_common.hpp" namespace shared_model { namespace interface { @@ -34,31 +31,6 @@ namespace iroha { std::shared_ptr batch) const = 0; - /** - * Event is triggered when proposal arrives from network. - * @return observable with Proposals. - * (List of Proposals) - */ - virtual rxcpp::observable onProposal() const = 0; - - /** - * Event is triggered when verified proposal arrives - * @return verified proposal and list of stateful validation errors - */ - virtual rxcpp::observable - onVerifiedProposal() const = 0; - - /** - * Event is triggered when commit block arrives. - * @return observable with sequence of committed blocks. - * In common case observable will contain one element. - * But there are scenarios when consensus provide many blocks, e.g. - * on peer startup - peer will get all actual blocks. - * Also, it can provide no blocks at all, if commit was empty - */ - virtual rxcpp::observable - onSynchronization() const = 0; - virtual ~PeerCommunicationService() = default; }; diff --git a/irohad/ordering/CMakeLists.txt b/irohad/ordering/CMakeLists.txt index 10ebf3fde94..2f3ec6cdbdd 100644 --- a/irohad/ordering/CMakeLists.txt +++ b/irohad/ordering/CMakeLists.txt @@ -11,12 +11,10 @@ target_link_libraries(on_demand_common add_library(on_demand_ordering_service impl/on_demand_ordering_service_impl.cpp - impl/kick_out_proposal_creation_strategy.cpp ) target_link_libraries(on_demand_ordering_service on_demand_common - TBB::tbb mst_hash mst_state shared_model_interfaces @@ -47,20 +45,16 @@ target_link_libraries(on_demand_connection_manager on_demand_common shared_model_interfaces consensus_round - rxcpp Boost::boost logger ) add_library(on_demand_ordering_gate impl/on_demand_ordering_gate.cpp - impl/ordering_gate_cache/ordering_gate_cache.cpp - impl/ordering_gate_cache/on_demand_cache.cpp ) target_link_libraries(on_demand_ordering_gate on_demand_common consensus_round - rxcpp Boost::boost logger common diff --git a/irohad/ordering/impl/kick_out_proposal_creation_strategy.cpp b/irohad/ordering/impl/kick_out_proposal_creation_strategy.cpp deleted file mode 100644 index 8389edca0e9..00000000000 --- a/irohad/ordering/impl/kick_out_proposal_creation_strategy.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "ordering/impl/kick_out_proposal_creation_strategy.hpp" - -#include - -using namespace iroha::ordering; - -KickOutProposalCreationStrategy::KickOutProposalCreationStrategy( - std::shared_ptr tolerance_checker) - : tolerance_checker_(std::move(tolerance_checker)) {} - -void KickOutProposalCreationStrategy::onCollaborationOutcome( - RoundType round, size_t peers_in_round) { - std::lock_guard guard(mutex_); - peers_in_round_ = peers_in_round; - - auto it = requested_count_.upper_bound(round); - requested_count_.erase(requested_count_.begin(), it); -} - -bool KickOutProposalCreationStrategy::shouldCreateRound(RoundType round) { - std::lock_guard guard(mutex_); - return not tolerance_checker_->isTolerated(requested_count_[round], - peers_in_round_); -} - -boost::optional -KickOutProposalCreationStrategy::onProposalRequest(RoundType requested_round) { - { - std::lock_guard guard(mutex_); - requested_count_[requested_round]++; - } - - return boost::none; -} diff --git a/irohad/ordering/impl/kick_out_proposal_creation_strategy.hpp b/irohad/ordering/impl/kick_out_proposal_creation_strategy.hpp deleted file mode 100644 index 044512fefe4..00000000000 --- a/irohad/ordering/impl/kick_out_proposal_creation_strategy.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_KICK_OUT_PROPOSAL_CREATION_STRATEGY_HPP -#define IROHA_KICK_OUT_PROPOSAL_CREATION_STRATEGY_HPP - -#include "ordering/ordering_service_proposal_creation_strategy.hpp" - -#include -#include -#include - -#include "consensus/yac/supermajority_checker.hpp" - -namespace iroha { - namespace ordering { - - /** - * Creation strategy based on supermajority checker tolerance condition - */ - class KickOutProposalCreationStrategy : public ProposalCreationStrategy { - public: - using SupermajorityCheckerType = - iroha::consensus::yac::SupermajorityChecker; - KickOutProposalCreationStrategy( - std::shared_ptr tolerance_checker); - - void onCollaborationOutcome(RoundType round, - size_t peers_in_round) override; - - bool shouldCreateRound(RoundType round) override; - - boost::optional onProposalRequest( - RoundType requested_round) override; - - private: - using RoundCollectionType = std::map; - - std::mutex mutex_; - std::shared_ptr tolerance_checker_; - size_t peers_in_round_; - RoundCollectionType requested_count_; - }; - } // namespace ordering -} // namespace iroha - -#endif // IROHA_KICK_OUT_PROPOSAL_CREATION_STRATEGY_HPP diff --git a/irohad/ordering/impl/on_demand_common.cpp b/irohad/ordering/impl/on_demand_common.cpp index ba31014a09d..03b2cc09b81 100644 --- a/irohad/ordering/impl/on_demand_common.cpp +++ b/irohad/ordering/impl/on_demand_common.cpp @@ -10,17 +10,6 @@ namespace iroha { const consensus::RejectRoundType kFirstRejectRound = 0; - consensus::RejectRoundType currentRejectRoundConsumer( - consensus::RejectRoundType round) { - return round + 2; - } - - const consensus::RejectRoundType kNextRejectRoundConsumer = - kFirstRejectRound + 1; - - const consensus::RejectRoundType kNextCommitRoundConsumer = - kFirstRejectRound; - consensus::Round nextCommitRound(const consensus::Round &round) { return {round.block_round + 1, kFirstRejectRound}; } diff --git a/irohad/ordering/impl/on_demand_common.hpp b/irohad/ordering/impl/on_demand_common.hpp index 8b1b43df5d6..4445e362107 100644 --- a/irohad/ordering/impl/on_demand_common.hpp +++ b/irohad/ordering/impl/on_demand_common.hpp @@ -6,24 +6,30 @@ #ifndef IROHA_ON_DEMAND_COMMON_HPP #define IROHA_ON_DEMAND_COMMON_HPP +#include +#include + #include "consensus/round.hpp" +namespace shared_model::interface { + class Proposal; +} + namespace iroha { namespace ordering { extern const consensus::RejectRoundType kFirstRejectRound; - consensus::RejectRoundType currentRejectRoundConsumer( - consensus::RejectRoundType round); - - extern const consensus::RejectRoundType kNextRejectRoundConsumer; - - extern const consensus::RejectRoundType kNextCommitRoundConsumer; - consensus::Round nextCommitRound(const consensus::Round &round); consensus::Round nextRejectRound(const consensus::Round &round); + struct ProposalEvent { + std::optional> + proposal; + consensus::Round round; + }; + } // namespace ordering } // namespace iroha diff --git a/irohad/ordering/impl/on_demand_connection_manager.cpp b/irohad/ordering/impl/on_demand_connection_manager.cpp index 2c3400a52b8..2f8f74ebe1c 100644 --- a/irohad/ordering/impl/on_demand_connection_manager.cpp +++ b/irohad/ordering/impl/on_demand_connection_manager.cpp @@ -5,8 +5,6 @@ #include "ordering/impl/on_demand_connection_manager.hpp" -#include -#include "common/bind.hpp" #include "common/result.hpp" #include "interfaces/iroha_internal/proposal.hpp" #include "logger/logger.hpp" @@ -17,75 +15,59 @@ using namespace iroha::ordering; OnDemandConnectionManager::OnDemandConnectionManager( std::shared_ptr factory, - rxcpp::observable peers, logger::LoggerPtr log) - : log_(std::move(log)), - factory_(std::move(factory)), - subscription_(peers.subscribe([this](const auto &peers) { - // `this' is captured raw and needs protection during destruction of - // OnDemandConnectionManager. We assert that - // OnDemandConnectionManager::initializeConnections locks the mutex and - // does not use `this' if stop_requested_ reads `true'. - this->initializeConnections(peers); - })) {} + : log_(std::move(log)), factory_(std::move(factory)) {} OnDemandConnectionManager::OnDemandConnectionManager( std::shared_ptr factory, - rxcpp::observable peers, CurrentPeers initial_peers, logger::LoggerPtr log) - : OnDemandConnectionManager(std::move(factory), peers, std::move(log)) { - // using start_with(initial_peers) results in deadlock + : OnDemandConnectionManager(std::move(factory), std::move(log)) { initializeConnections(initial_peers); } OnDemandConnectionManager::~OnDemandConnectionManager() { - subscription_.unsubscribe(); stop_requested_.store(true); std::lock_guard lock(mutex_); } void OnDemandConnectionManager::onBatches(CollectionType batches) { /* - * Transactions are always sent to the round after the next round (+2) - * There are 4 possibilities - all combinations of commits and rejects in the - * following two rounds. This can be visualised as a diagram, where: o - - * current round, x - next round, v - target round + * Transactions are sent to the current and next rounds (+1) + * There are 3 possibilities. This can be visualised as a diagram, + * where: o - current round, x - next round * - * 0 1 2 0 1 2 0 1 2 0 1 2 - * 0 o x v 0 o . . 0 o x . 0 o . . - * 1 . . . 1 x v . 1 v . . 1 x . . - * 2 . . . 2 . . . 2 . . . 2 v . . - * RejectReject CommitReject RejectCommit CommitCommit + * 0 1 0 1 0 1 + * 0 o . 0 o x 0 o . + * 1 . . 1 . . 1 x . + * Issuer Reject Commit */ auto propagate = [&](auto consumer) { std::shared_lock lock(mutex_); if (not stop_requested_.load(std::memory_order_relaxed)) { - connections_.peers[consumer] | [&batches](const auto &connection) { - connection->onBatches(batches); + if (auto &connection = connections_.peers[consumer]) { + (*connection)->onBatches(batches); }; } }; - propagate(kRejectRejectConsumer); - propagate(kRejectCommitConsumer); - propagate(kCommitRejectConsumer); - propagate(kCommitCommitConsumer); + propagate(kIssuer); + propagate(kRejectConsumer); + propagate(kCommitConsumer); } -boost::optional> -OnDemandConnectionManager::onRequestProposal(consensus::Round round) { +void OnDemandConnectionManager::onRequestProposal(consensus::Round round) { std::shared_lock lock(mutex_); if (stop_requested_.load(std::memory_order_relaxed)) { - return boost::none; + return; } log_->debug("onRequestProposal, {}", round); - return connections_.peers[kIssuer] | [&round](const auto &connection) { - return connection->onRequestProposal(round); - }; + if (auto &connection = connections_.peers[kIssuer]) { + (*connection)->onRequestProposal(round); + } } void OnDemandConnectionManager::initializeConnections( @@ -95,11 +77,15 @@ void OnDemandConnectionManager::initializeConnections( // Object was destroyed and `this' is no longer valid. return; } - auto create_assign = [this](auto &connection, auto &peer) { - connection = expected::resultToOptionalValue(factory_->create(*peer)); + auto create_assign = [&](auto target) { + auto maybe_connection = factory_->create(*peers.peers[target]); + if (expected::hasError(maybe_connection)) { + connections_.peers[target] = std::nullopt; + } + connections_.peers[target] = std::move(maybe_connection).assumeValue(); }; - for (auto &&pair : boost::combine(connections_.peers, peers.peers)) { - create_assign(boost::get<0>(pair), boost::get<1>(pair)); - } + create_assign(kIssuer); + create_assign(kRejectConsumer); + create_assign(kCommitConsumer); } diff --git a/irohad/ordering/impl/on_demand_connection_manager.hpp b/irohad/ordering/impl/on_demand_connection_manager.hpp index c8c59432171..ab5ae54f724 100644 --- a/irohad/ordering/impl/on_demand_connection_manager.hpp +++ b/irohad/ordering/impl/on_demand_connection_manager.hpp @@ -8,10 +8,10 @@ #include "ordering/on_demand_os_transport.hpp" +#include #include #include -#include #include "logger/logger_fwd.hpp" namespace iroha { @@ -25,18 +25,10 @@ namespace iroha { /** * Responsibilities of individual peers from the peers array * Transactions are sent to three ordering services: - * reject round for current block, reject round for next block, and - * commit for subsequent next round + * current round (issuer), reject round, and commit round * Proposal is requested from the current ordering service: issuer */ - enum PeerType { - kRejectRejectConsumer = 0, - kRejectCommitConsumer, - kCommitRejectConsumer, - kCommitCommitConsumer, - kIssuer, - kCount - }; + enum PeerType { kRejectConsumer = 0, kCommitConsumer, kIssuer, kCount }; /// Collection with value types which represent peers template @@ -53,12 +45,10 @@ namespace iroha { OnDemandConnectionManager( std::shared_ptr factory, - rxcpp::observable peers, logger::LoggerPtr log); OnDemandConnectionManager( std::shared_ptr factory, - rxcpp::observable peers, CurrentPeers initial_peers, logger::LoggerPtr log); @@ -66,8 +56,13 @@ namespace iroha { void onBatches(CollectionType batches) override; - boost::optional> onRequestProposal( - consensus::Round round) override; + void onRequestProposal(consensus::Round round) override; + + /** + * Initialize corresponding peers in connections_ using factory_ + * @param peers to initialize connections with + */ + void initializeConnections(const CurrentPeers &peers); private: /** @@ -76,19 +71,12 @@ namespace iroha { */ struct CurrentConnections { PeerCollectionType< - boost::optional>> + std::optional>> peers; }; - /** - * Initialize corresponding peers in connections_ using factory_ - * @param peers to initialize connections with - */ - void initializeConnections(const CurrentPeers &peers); - logger::LoggerPtr log_; std::shared_ptr factory_; - rxcpp::composite_subscription subscription_; CurrentConnections connections_; diff --git a/irohad/ordering/impl/on_demand_ordering_gate.cpp b/irohad/ordering/impl/on_demand_ordering_gate.cpp index d6c1708a7ab..132dffe5e29 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.cpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.cpp @@ -15,67 +15,26 @@ #include "ametsuchi/tx_presence_cache_utils.hpp" #include "common/visitor.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" +#include "interfaces/iroha_internal/transaction_batch_impl.hpp" #include "interfaces/iroha_internal/transaction_batch_parser_impl.hpp" #include "logger/logger.hpp" #include "ordering/impl/on_demand_common.hpp" -using namespace iroha; -using namespace iroha::ordering; +using iroha::ordering::OnDemandOrderingGate; OnDemandOrderingGate::OnDemandOrderingGate( std::shared_ptr ordering_service, - std::unique_ptr network_client, - rxcpp::observable< - std::shared_ptr> - processed_tx_hashes, - rxcpp::observable round_switch_events, + std::shared_ptr network_client, std::shared_ptr factory, std::shared_ptr tx_cache, - std::shared_ptr proposal_creation_strategy, size_t transaction_limit, logger::LoggerPtr log) : log_(std::move(log)), transaction_limit_(transaction_limit), ordering_service_(std::move(ordering_service)), network_client_(std::move(network_client)), - processed_tx_hashes_subscription_( - processed_tx_hashes.subscribe([this](auto hashes) { - // remove transaction hashes from cache - log_->debug("Asking to remove {} transactions from cache.", - hashes->size()); - ordering_service_->onTxsCommitted(*hashes); - })), - round_switch_subscription_(round_switch_events.subscribe( - [this, - proposal_creation_strategy = - std::move(proposal_creation_strategy)](auto event) { - log_->debug("Current: {}", event.next_round); - - std::shared_lock stop_lock(stop_mutex_); - if (stop_requested_) { - log_->warn("Not doing anything because stop was requested."); - return; - } - - // notify our ordering service about new round - proposal_creation_strategy->onCollaborationOutcome( - event.next_round, event.ledger_state->ledger_peers.size()); - ordering_service_->onCollaborationOutcome(event.next_round); - - this->sendCachedTransactions(); - - // request proposal for the current round - auto proposal = this->processProposalRequest( - network_client_->onRequestProposal(event.next_round)); - // vote for the object received from the network - proposal_notifier_.get_subscriber().on_next( - network::OrderingEvent{std::move(proposal), - event.next_round, - std::move(event.ledger_state)}); - })), proposal_factory_(std::move(factory)), - tx_cache_(std::move(tx_cache)), - proposal_notifier_(proposal_notifier_lifetime_) {} + tx_cache_(std::move(tx_cache)) {} OnDemandOrderingGate::~OnDemandOrderingGate() { stop(); @@ -96,8 +55,24 @@ void OnDemandOrderingGate::propagateBatch( transport::OdOsNotification::CollectionType{batch}); } -rxcpp::observable OnDemandOrderingGate::onProposal() { - return proposal_notifier_.get_observable(); +void OnDemandOrderingGate::processRoundSwitch(RoundSwitch const &event) { + log_->debug("Current: {}", event.next_round); + current_round_ = event.next_round; + current_ledger_state_ = event.ledger_state; + + std::shared_lock stop_lock(stop_mutex_); + if (stop_requested_) { + log_->warn("Not doing anything because stop was requested."); + return; + } + + // notify our ordering service about new round + ordering_service_->onCollaborationOutcome(event.next_round); + + this->sendCachedTransactions(); + + // request proposal for the current round + network_client_->onRequestProposal(event.next_round); } void OnDemandOrderingGate::stop() { @@ -105,28 +80,41 @@ void OnDemandOrderingGate::stop() { if (not stop_requested_) { stop_requested_ = true; log_->info("Stopping."); - proposal_notifier_lifetime_.unsubscribe(); - processed_tx_hashes_subscription_.unsubscribe(); - round_switch_subscription_.unsubscribe(); network_client_.reset(); } } -boost::optional> -OnDemandOrderingGate::processProposalRequest( - boost::optional< - std::shared_ptr> proposal) - const { - if (not proposal) { - return boost::none; +std::optional +OnDemandOrderingGate::processProposalRequest(ProposalEvent const &event) const { + if (not current_ledger_state_ || event.round != current_round_) { + return std::nullopt; + } + if (not event.proposal) { + return network::OrderingEvent{ + std::nullopt, event.round, current_ledger_state_}; } - auto proposal_without_replays = - removeReplaysAndDuplicates(*std::move(proposal)); + auto result = removeReplaysAndDuplicates(*event.proposal); // no need to check empty proposal - if (boost::empty(proposal_without_replays->transactions())) { - return boost::none; + if (boost::empty(result->transactions())) { + return network::OrderingEvent{ + std::nullopt, event.round, current_ledger_state_}; + } + shared_model::interface::types::SharedTxsCollectionType transactions; + for (auto &transaction : result->transactions()) { + transactions.push_back(clone(transaction)); + } + auto batch_txs = + shared_model::interface::TransactionBatchParserImpl().parseBatches( + transactions); + shared_model::interface::types::BatchesCollectionType batches; + for (auto &txs : batch_txs) { + batches.push_back( + std::make_shared( + std::move(txs))); } - return proposal_without_replays; + ordering_service_->processReceivedProposal(batches); + return network::OrderingEvent{ + std::move(result), event.round, current_ledger_state_}; } void OnDemandOrderingGate::sendCachedTransactions() { @@ -161,8 +149,11 @@ OnDemandOrderingGate::removeReplaysAndDuplicates( // TODO andrei 30.11.18 IR-51 Handle database error return false; } - // TODO nickaleks 21.11.18: IR-1887 log replayed transactions - return !ametsuchi::isAlreadyProcessed(*tx_result); + auto is_processed = ametsuchi::isAlreadyProcessed(*tx_result); + if (is_processed) + log_->warn("Duplicate transaction: {}", + iroha::ametsuchi::getHash(*tx_result).hex()); + return !is_processed; }; std::unordered_set hashes; diff --git a/irohad/ordering/impl/on_demand_ordering_gate.hpp b/irohad/ordering/impl/on_demand_ordering_gate.hpp index 218874c9803..bdc27cc9d69 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.hpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.hpp @@ -10,15 +10,14 @@ #include -#include -#include #include "interfaces/common_objects/types.hpp" #include "interfaces/iroha_internal/proposal.hpp" #include "interfaces/iroha_internal/unsafe_proposal_factory.hpp" #include "logger/logger_fwd.hpp" -#include "ordering/impl/ordering_gate_cache/ordering_gate_cache.hpp" +#include "ordering/impl/on_demand_common.hpp" +#include "ordering/impl/round_switch.hpp" #include "ordering/on_demand_ordering_service.hpp" -#include "ordering/ordering_service_proposal_creation_strategy.hpp" +#include "ordering/on_demand_os_transport.hpp" namespace iroha { namespace ametsuchi { @@ -33,27 +32,12 @@ namespace iroha { */ class OnDemandOrderingGate : public network::OrderingGate { public: - struct RoundSwitch { - consensus::Round next_round; - std::shared_ptr ledger_state; - - RoundSwitch(consensus::Round next_round, - std::shared_ptr ledger_state) - : next_round(std::move(next_round)), - ledger_state(std::move(ledger_state)) {} - }; - OnDemandOrderingGate( std::shared_ptr ordering_service, - std::unique_ptr network_client, - rxcpp::observable< - std::shared_ptr> - processed_tx_hashes, - rxcpp::observable round_switch_events, + std::shared_ptr network_client, std::shared_ptr factory, std::shared_ptr tx_cache, - std::shared_ptr proposal_creation_strategy, size_t transaction_limit, logger::LoggerPtr log); @@ -63,20 +47,17 @@ namespace iroha { std::shared_ptr batch) override; - rxcpp::observable onProposal() override; + void processRoundSwitch(RoundSwitch const &event); - void stop() override; - - private: /** * Handle an incoming proposal from ordering service */ - boost::optional> - processProposalRequest( - boost::optional< - std::shared_ptr> - proposal) const; + std::optional processProposalRequest( + ProposalEvent const &event) const; + void stop() override; + + private: void sendCachedTransactions(); /** @@ -92,18 +73,15 @@ namespace iroha { /// max number of transactions passed to one ordering service size_t transaction_limit_; std::shared_ptr ordering_service_; - std::unique_ptr network_client_; - rxcpp::composite_subscription processed_tx_hashes_subscription_; - rxcpp::composite_subscription round_switch_subscription_; + std::shared_ptr network_client_; std::shared_ptr proposal_factory_; std::shared_ptr tx_cache_; + consensus::Round current_round_; + std::shared_ptr current_ledger_state_; std::shared_timed_mutex stop_mutex_; bool stop_requested_{false}; - - rxcpp::composite_subscription proposal_notifier_lifetime_; - rxcpp::subjects::subject proposal_notifier_; }; } // namespace ordering diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp index a22f5a1f2e8..a21d6cf0f4f 100644 --- a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp +++ b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp @@ -7,11 +7,7 @@ #include -#include -#include #include -#include -#include #include #include "ametsuchi/tx_presence_cache.hpp" #include "ametsuchi/tx_presence_cache_utils.hpp" @@ -21,24 +17,21 @@ #include "interfaces/iroha_internal/transaction_batch.hpp" #include "interfaces/transaction.hpp" #include "logger/logger.hpp" +#include "main/subscription.hpp" -using namespace iroha; -using namespace iroha::ordering; -using TransactionBatchType = transport::OdOsNotification::TransactionBatchType; +using iroha::ordering::OnDemandOrderingServiceImpl; OnDemandOrderingServiceImpl::OnDemandOrderingServiceImpl( size_t transaction_limit, std::shared_ptr proposal_factory, std::shared_ptr tx_cache, - std::shared_ptr proposal_creation_strategy, logger::LoggerPtr log, size_t number_of_proposals) : transaction_limit_(transaction_limit), number_of_proposals_(number_of_proposals), proposal_factory_(std::move(proposal_factory)), tx_cache_(std::move(tx_cache)), - proposal_creation_strategy_(std::move(proposal_creation_strategy)), log_(std::move(log)) {} // -------------------------| OnDemandOrderingService |------------------------- @@ -46,49 +39,39 @@ OnDemandOrderingServiceImpl::OnDemandOrderingServiceImpl( void OnDemandOrderingServiceImpl::onCollaborationOutcome( consensus::Round round) { log_->info("onCollaborationOutcome => {}", round); - current_round_ = round; - uploadProposal(round); + { + std::lock_guard lock(proposals_mutex_); + current_round_ = round; + } tryErase(round); } -// ----------------------------| OdOsNotification |----------------------------- - void OnDemandOrderingServiceImpl::onBatches(CollectionType batches) { - auto unprocessed_batches = - boost::adaptors::filter(batches, [this](const auto &batch) { - log_->debug("check batch {} for already processed transactions", - batch->reducedHash().hex()); - return not this->batchAlreadyProcessed(*batch); - }); - std::for_each(unprocessed_batches.begin(), - unprocessed_batches.end(), - [this](auto &obj) { insertBatchToCache(obj); }); - log_->info("onBatches => collection size = {}", batches.size()); -} + for (auto &batch : batches) + if (not batchAlreadyProcessed(*batch)) + if (!insertBatchToCache(batch)) + break; -boost::optional< - std::shared_ptr> -OnDemandOrderingServiceImpl::onRequestProposal(consensus::Round round) { - log_->debug("Requesting a proposal for round {}", round); - boost::optional< - std::shared_ptr> - result = uploadProposal(round); - log_->debug("onRequestProposal, {}, {}returning a proposal.", - round, - result ? "" : "NOT "); - return result; + log_->info("onBatches => collection size = {}", batches.size()); } // ---------------------------------| Private |--------------------------------- -void OnDemandOrderingServiceImpl::insertBatchToCache( +bool OnDemandOrderingServiceImpl::insertBatchToCache( std::shared_ptr const &batch) { std::lock_guard lock(batches_cache_cs_); - batches_cache_.insert(batch); + if (used_batches_cache_.find(batch) == used_batches_cache_.end()) { + batches_cache_.insert(batch); + getSubscription()->notify(EventTypes::kOnNewBatchInCache, + std::shared_ptr(batch)); + } + return true; } void OnDemandOrderingServiceImpl::removeFromBatchesCache( const OnDemandOrderingService::HashesSetType &hashes) { std::lock_guard lock(batches_cache_cs_); + batches_cache_.merge(used_batches_cache_); + assert(used_batches_cache_.empty()); for (auto it = batches_cache_.begin(); it != batches_cache_.end();) { if (std::any_of(it->get()->transactions().begin(), it->get()->transactions().end(), @@ -108,8 +91,7 @@ bool OnDemandOrderingServiceImpl::isEmptyBatchesCache() const { } void OnDemandOrderingServiceImpl::forCachedBatches( - std::function< - void(const transport::OdOsNotification::BatchesSetType &)> const &f) { + std::function const &f) const { std::shared_lock lock(batches_cache_cs_); f(batches_cache_); } @@ -120,24 +102,26 @@ OnDemandOrderingServiceImpl::getTransactionsFromBatchesCache( std::vector> collection; collection.reserve(requested_tx_amount); - std::shared_lock lock(batches_cache_cs_); + std::lock_guard lock(batches_cache_cs_); auto it = batches_cache_.begin(); - for (; it != batches_cache_.end() - and collection.size() + boost::size((*it)->transactions()) - <= requested_tx_amount; - ++it) { + while (it != batches_cache_.end() + and collection.size() + boost::size((*it)->transactions()) + <= requested_tx_amount) { collection.insert(std::end(collection), std::begin((*it)->transactions()), std::end((*it)->transactions())); + used_batches_cache_.insert(*it); + batches_cache_.erase(it); + it = batches_cache_.begin(); } return collection; } -boost::optional< - std::shared_ptr> -OnDemandOrderingServiceImpl::uploadProposal(consensus::Round round) { - boost::optional< +std::optional> +OnDemandOrderingServiceImpl::onRequestProposal(consensus::Round round) { + log_->debug("Requesting a proposal for round {}", round); + std::optional< std::shared_ptr> result; do { @@ -154,18 +138,23 @@ OnDemandOrderingServiceImpl::uploadProposal(consensus::Round round) { : (round.block_round - current_round_.block_round)) <= 2ull; - if (is_current_round_or_next2) + if (is_current_round_or_next2) { result = packNextProposals(round); + getSubscription()->notify(EventTypes::kOnPackProposal, round); + } } while (false); + log_->debug("uploadProposal, {}, {}returning a proposal.", + round, + result ? "" : "NOT "); return result; } -boost::optional> +std::optional> OnDemandOrderingServiceImpl::tryCreateProposal( consensus::Round const &round, const TransactionsCollectionType &txs, shared_model::interface::types::TimestampType created_time) { - boost::optional> proposal; + std::optional> proposal; if (not txs.empty()) { proposal = proposal_factory_->unsafeCreateProposal( round.block_round, created_time, txs | boost::adaptors::indirected); @@ -175,7 +164,7 @@ OnDemandOrderingServiceImpl::tryCreateProposal( round, txs.size()); } else { - proposal = boost::none; + proposal = std::nullopt; log_->debug("No transactions to create a proposal for {}", round); } @@ -184,7 +173,7 @@ OnDemandOrderingServiceImpl::tryCreateProposal( return proposal; } -boost::optional> +std::optional> OnDemandOrderingServiceImpl::packNextProposals(const consensus::Round &round) { auto now = iroha::time::now(); std::vector> txs; @@ -244,3 +233,17 @@ bool OnDemandOrderingServiceImpl::batchAlreadyProcessed( return false; }); } + +bool OnDemandOrderingServiceImpl::hasProposal(consensus::Round round) const { + std::lock_guard lock(proposals_mutex_); + return proposal_map_.find(round) != proposal_map_.end(); +} + +void OnDemandOrderingServiceImpl::processReceivedProposal( + CollectionType batches) { + std::lock_guard lock(batches_cache_cs_); + for (auto &batch : batches) { + batches_cache_.erase(batch); + used_batches_cache_.insert(batch); + } +} diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.hpp b/irohad/ordering/impl/on_demand_ordering_service_impl.hpp index ae3bbe2f08c..d09a662e8d4 100644 --- a/irohad/ordering/impl/on_demand_ordering_service_impl.hpp +++ b/irohad/ordering/impl/on_demand_ordering_service_impl.hpp @@ -8,20 +8,16 @@ #include "ordering/on_demand_ordering_service.hpp" -#include -#include #include #include #include -#include #include "interfaces/iroha_internal/unsafe_proposal_factory.hpp" #include "logger/logger_fwd.hpp" #include "multi_sig_transactions/hash.hpp" // TODO 2019-03-15 andrei: IR-403 Separate BatchHashEquality and MstState #include "multi_sig_transactions/state/mst_state.hpp" #include "ordering/impl/on_demand_common.hpp" -#include "ordering/ordering_service_proposal_creation_strategy.hpp" namespace iroha { namespace ametsuchi { @@ -29,15 +25,10 @@ namespace iroha { } namespace ordering { namespace detail { - using BatchSetType = tbb::concurrent_unordered_set< - transport::OdOsNotification::TransactionBatchType, - model::PointerBatchHasher, - shared_model::interface::BatchHashEquality>; - using ProposalMapType = std::map>>; + std::optional>>; } // namespace detail class OnDemandOrderingServiceImpl : public OnDemandOrderingService { @@ -51,48 +42,42 @@ namespace iroha { * @param log to print progress * @param number_of_proposals - number of stored proposals, older will be * removed. Default value is 3 - * @param creation_strategy - provides a strategy for creating proposals */ OnDemandOrderingServiceImpl( size_t transaction_limit, std::shared_ptr proposal_factory, std::shared_ptr tx_cache, - std::shared_ptr proposal_creation_strategy, logger::LoggerPtr log, size_t number_of_proposals = 3); // --------------------- | OnDemandOrderingService |_--------------------- + void onBatches(CollectionType batches) override; + + std::optional> onRequestProposal( + consensus::Round round) override; + void onCollaborationOutcome(consensus::Round round) override; void onTxsCommitted(const HashesSetType &hashes) override { removeFromBatchesCache(hashes); } - // ----------------------- | OdOsNotification | -------------------------- - - void onBatches(CollectionType batches) override; - - boost::optional> onRequestProposal( - consensus::Round round) override; + void processReceivedProposal(CollectionType batches) override; private: /** * Packs new proposals and creates new rounds * Note: method is not thread-safe */ - boost::optional> + std::optional> packNextProposals(const consensus::Round &round); - boost::optional< - std::shared_ptr> - uploadProposal(consensus::Round round); - using TransactionsCollectionType = std::vector>; - boost::optional> + std::optional> tryCreateProposal( consensus::Round const &round, const TransactionsCollectionType &txs, @@ -111,22 +96,23 @@ namespace iroha { bool batchAlreadyProcessed( const shared_model::interface::TransactionBatch &batch); - void insertBatchToCache( + bool insertBatchToCache( std::shared_ptr const &batch); void removeFromBatchesCache( const OnDemandOrderingService::HashesSetType &hashes); - bool isEmptyBatchesCache() const; + bool isEmptyBatchesCache() const override; void forCachedBatches( - std::function const &f) override; + std::function const &f) const override; std::vector> getTransactionsFromBatchesCache(size_t requested_tx_amount); + bool hasProposal(consensus::Round round) const override; + /** * Max number of transaction in one proposal */ @@ -145,10 +131,10 @@ namespace iroha { /** * Proposal collection mutexes for public methods */ - std::mutex proposals_mutex_; + mutable std::mutex proposals_mutex_; mutable std::shared_timed_mutex batches_cache_cs_; - BatchesSetType batches_cache_; + BatchesSetType batches_cache_, used_batches_cache_; std::shared_ptr proposal_factory_; @@ -158,11 +144,6 @@ namespace iroha { */ std::shared_ptr tx_cache_; - /** - * Strategy for creating proposals - */ - std::shared_ptr proposal_creation_strategy_; - /** * Logger instance */ diff --git a/irohad/ordering/impl/on_demand_os_client_grpc.cpp b/irohad/ordering/impl/on_demand_os_client_grpc.cpp index 54529f333f5..5adc7bcb7b1 100644 --- a/irohad/ordering/impl/on_demand_os_client_grpc.cpp +++ b/irohad/ordering/impl/on_demand_os_client_grpc.cpp @@ -10,26 +10,25 @@ #include "interfaces/common_objects/peer.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" #include "logger/logger.hpp" +#include "main/subscription.hpp" #include "network/impl/client_factory.hpp" -using namespace iroha; -using namespace iroha::ordering; -using namespace iroha::ordering::transport; +using iroha::ordering::transport::OnDemandOsClientGrpc; +using iroha::ordering::transport::OnDemandOsClientGrpcFactory; OnDemandOsClientGrpc::OnDemandOsClientGrpc( std::shared_ptr stub, - std::shared_ptr> - async_call, std::shared_ptr proposal_factory, std::function time_provider, std::chrono::milliseconds proposal_request_timeout, - logger::LoggerPtr log) + logger::LoggerPtr log, + std::function callback) : log_(std::move(log)), stub_(std::move(stub)), - async_call_(std::move(async_call)), proposal_factory_(std::move(proposal_factory)), time_provider_(std::move(time_provider)), - proposal_request_timeout_(proposal_request_timeout) {} + proposal_request_timeout_(proposal_request_timeout), + callback_(std::move(callback)) {} void OnDemandOsClientGrpc::onBatches(CollectionType batches) { proto::BatchesRequest request; @@ -41,67 +40,113 @@ void OnDemandOsClientGrpc::onBatches(CollectionType batches) { } } - log_->debug("Propagating: '{}'", request.DebugString()); - - async_call_->Call([&](auto context, auto cq) { - return stub_->AsyncSendBatches(context, request, cq); - }); + getSubscription()->dispatcher()->add( + getSubscription()->dispatcher()->kExecuteInPool, + [time_provider(time_provider_), + request(std::move(request)), + stub(utils::make_weak(stub_)), + log(utils::make_weak(log_))] { + auto maybe_stub = stub.lock(); + auto maybe_log = log.lock(); + if (not(maybe_stub and maybe_log)) { + return; + } + grpc::ClientContext context; + context.set_wait_for_ready(true); + context.set_deadline(time_provider() + std::chrono::seconds(5)); + google::protobuf::Empty response; + maybe_log->info("Sending batches"); + auto status = maybe_stub->SendBatches(&context, request, &response); + if (not status.ok()) { + maybe_log->warn( + "RPC failed: {} {}", context.peer(), status.error_message()); + } else { + maybe_log->info("RPC succeeded: {}", context.peer()); + } + }); } -boost::optional> -OnDemandOsClientGrpc::onRequestProposal(consensus::Round round) { - grpc::ClientContext context; - context.set_deadline(time_provider_() + proposal_request_timeout_); +void OnDemandOsClientGrpc::onRequestProposal(consensus::Round round) { + // Cancel an unfinished request + if (auto maybe_context = context_.lock()) { + maybe_context->TryCancel(); + } + + auto context = std::make_shared(); + context_ = context; proto::ProposalRequest request; request.mutable_round()->set_block_round(round.block_round); request.mutable_round()->set_reject_round(round.reject_round); - proto::ProposalResponse response; - auto status = stub_->RequestProposal(&context, request, &response); - if (not status.ok()) { - log_->warn("RPC failed: {}", status.error_message()); - return boost::none; - } - if (not response.has_proposal()) { - return boost::none; - } - return proposal_factory_->build(response.proposal()) - .match( - [&](auto &&v) { - return boost::make_optional( - std::shared_ptr( - std::move(v).value)); - }, - [this](const auto &error) { - log_->info("{}", error.error.error); // error - return boost::optional< - std::shared_ptr>(); - }); + getSubscription()->dispatcher()->add( + getSubscription()->dispatcher()->kExecuteInPool, + [round, + time_provider(time_provider_), + proposal_request_timeout(proposal_request_timeout_), + context(std::move(context)), + request(std::move(request)), + stub(utils::make_weak(stub_)), + log(utils::make_weak(log_)), + proposal_factory(utils::make_weak(proposal_factory_)), + callback(callback_)] { + auto maybe_stub = stub.lock(); + auto maybe_log = log.lock(); + auto maybe_proposal_factory = proposal_factory.lock(); + if (not(maybe_stub and maybe_log and maybe_proposal_factory)) { + return; + } + context->set_wait_for_ready(true); + context->set_deadline(time_provider() + proposal_request_timeout); + proto::ProposalResponse response; + maybe_log->info("Requesting proposal"); + auto status = + maybe_stub->RequestProposal(context.get(), request, &response); + if (not status.ok()) { + maybe_log->warn( + "RPC failed: {} {}", context->peer(), status.error_message()); + callback({std::nullopt, round}); + return; + } else { + maybe_log->info("RPC succeeded: {}", context->peer()); + } + if (not response.has_proposal()) { + callback({std::nullopt, round}); + return; + } + auto maybe_proposal = + maybe_proposal_factory->build(response.proposal()); + if (expected::hasError(maybe_proposal)) { + maybe_log->info("{}", maybe_proposal.assumeError().error); + callback({std::nullopt, round}); + } + callback({std::move(maybe_proposal).assumeValue(), round}); + }); } OnDemandOsClientGrpcFactory::OnDemandOsClientGrpcFactory( - std::shared_ptr> - async_call, std::shared_ptr proposal_factory, std::function time_provider, OnDemandOsClientGrpc::TimeoutType proposal_request_timeout, logger::LoggerPtr client_log, - std::unique_ptr client_factory) - : async_call_(std::move(async_call)), - proposal_factory_(std::move(proposal_factory)), + std::unique_ptr client_factory, + std::function callback) + : proposal_factory_(std::move(proposal_factory)), time_provider_(time_provider), proposal_request_timeout_(proposal_request_timeout), client_log_(std::move(client_log)), - client_factory_(std::move(client_factory)) {} + client_factory_(std::move(client_factory)), + callback_(callback) {} -expected::Result, std::string> +iroha::expected::Result< + std::unique_ptr, + std::string> OnDemandOsClientGrpcFactory::create(const shared_model::interface::Peer &to) { return client_factory_->createClient(to) | [&](auto &&client) -> std::unique_ptr { return std::make_unique(std::move(client), - async_call_, proposal_factory_, time_provider_, proposal_request_timeout_, - client_log_); + client_log_, + callback_); }; } diff --git a/irohad/ordering/impl/on_demand_os_client_grpc.hpp b/irohad/ordering/impl/on_demand_os_client_grpc.hpp index 4a2c39cc215..16cdcddfc25 100644 --- a/irohad/ordering/impl/on_demand_os_client_grpc.hpp +++ b/irohad/ordering/impl/on_demand_os_client_grpc.hpp @@ -11,8 +11,8 @@ #include "common/result.hpp" #include "interfaces/iroha_internal/abstract_transport_factory.hpp" #include "logger/logger_fwd.hpp" -#include "network/impl/async_grpc_client.hpp" #include "ordering.grpc.pb.h" +#include "ordering/impl/on_demand_common.hpp" namespace iroha { namespace network { @@ -40,26 +40,24 @@ namespace iroha { */ OnDemandOsClientGrpc( std::shared_ptr stub, - std::shared_ptr> - async_call, std::shared_ptr proposal_factory, std::function time_provider, std::chrono::milliseconds proposal_request_timeout, - logger::LoggerPtr log); + logger::LoggerPtr log, + std::function callback); void onBatches(CollectionType batches) override; - boost::optional> onRequestProposal( - consensus::Round round) override; + void onRequestProposal(consensus::Round round) override; private: logger::LoggerPtr log_; std::shared_ptr stub_; - std::shared_ptr> - async_call_; std::shared_ptr proposal_factory_; std::function time_provider_; std::chrono::milliseconds proposal_request_timeout_; + std::function callback_; + std::weak_ptr context_; }; class OnDemandOsClientGrpcFactory : public OdOsNotificationFactory { @@ -69,25 +67,23 @@ namespace iroha { using TransportFactoryType = OnDemandOsClientGrpc::TransportFactoryType; OnDemandOsClientGrpcFactory( - std::shared_ptr> - async_call, std::shared_ptr proposal_factory, std::function time_provider, OnDemandOsClientGrpc::TimeoutType proposal_request_timeout, logger::LoggerPtr client_log, - std::unique_ptr client_factory); + std::unique_ptr client_factory, + std::function callback); iroha::expected::Result, std::string> create(const shared_model::interface::Peer &to) override; private: - std::shared_ptr> - async_call_; std::shared_ptr proposal_factory_; std::function time_provider_; std::chrono::milliseconds proposal_request_timeout_; logger::LoggerPtr client_log_; std::unique_ptr client_factory_; + std::function callback_; }; } // namespace transport diff --git a/irohad/ordering/impl/on_demand_os_server_grpc.cpp b/irohad/ordering/impl/on_demand_os_server_grpc.cpp index 0581a11bdce..f22df355b1d 100644 --- a/irohad/ordering/impl/on_demand_os_server_grpc.cpp +++ b/irohad/ordering/impl/on_demand_os_server_grpc.cpp @@ -3,35 +3,35 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include "common/default_constructible_unary_fn.hpp" // non-copyable value workaround - #include "ordering/impl/on_demand_os_server_grpc.hpp" -#include -#include #include "backend/protobuf/deserialize_repeated_transactions.hpp" #include "backend/protobuf/proposal.hpp" -#include "common/bind.hpp" #include "interfaces/iroha_internal/parse_and_create_batches.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" #include "logger/logger.hpp" +#include "main/subscription.hpp" +#include "ordering/on_demand_ordering_service.hpp" +#include "subscription/scheduler_impl.hpp" using namespace iroha::ordering; using namespace iroha::ordering::transport; OnDemandOsServerGrpc::OnDemandOsServerGrpc( - std::shared_ptr ordering_service, + std::shared_ptr ordering_service, std::shared_ptr transaction_factory, std::shared_ptr batch_parser, std::shared_ptr transaction_batch_factory, - logger::LoggerPtr log) + logger::LoggerPtr log, + std::chrono::milliseconds delay) : ordering_service_(ordering_service), transaction_factory_(std::move(transaction_factory)), batch_parser_(std::move(batch_parser)), batch_factory_(std::move(transaction_batch_factory)), - log_(std::move(log)) {} + log_(std::move(log)), + delay_(delay) {} grpc::Status OnDemandOsServerGrpc::SendBatches( ::grpc::ServerContext *context, @@ -52,6 +52,10 @@ grpc::Status OnDemandOsServerGrpc::SendBatches( return ::grpc::Status::OK; } + log_->info("Received SendBatches with {} from {}", + *batches.assumeValue().front(), + context->peer()); + ordering_service_->onBatches(std::move(batches).assumeValue()); return ::grpc::Status::OK; @@ -61,12 +65,49 @@ grpc::Status OnDemandOsServerGrpc::RequestProposal( ::grpc::ServerContext *context, const proto::ProposalRequest *request, proto::ProposalResponse *response) { - ordering_service_->onRequestProposal( - {request->round().block_round(), request->round().reject_round()}) - | [&](auto &&proposal) { - *response->mutable_proposal() = - static_cast(proposal.get()) - ->getTransport(); - }; + consensus::Round round{request->round().block_round(), + request->round().reject_round()}; + log_->info("Received RequestProposal for {} from {}", round, context->peer()); + if (not ordering_service_->hasProposal(round) + and ordering_service_->isEmptyBatchesCache()) { + auto scheduler = std::make_shared(); + auto tid = getSubscription()->dispatcher()->bind(scheduler); + + auto batches_subscription = SubscriberCreator< + bool, + std::shared_ptr>:: + template create( + static_cast(*tid), + [scheduler(utils::make_weak(scheduler))](auto, auto) { + if (auto maybe_scheduler = scheduler.lock()) + maybe_scheduler->dispose(); + }); + auto proposals_subscription = + SubscriberCreator::template create< + EventTypes::kOnPackProposal>( + static_cast(*tid), + [round, scheduler(utils::make_weak(scheduler))](auto, + auto packed_round) { + if (auto maybe_scheduler = scheduler.lock(); + maybe_scheduler and round == packed_round) + maybe_scheduler->dispose(); + }); + scheduler->addDelayed(delay_, [scheduler(utils::make_weak(scheduler))] { + if (auto maybe_scheduler = scheduler.lock()) { + maybe_scheduler->dispose(); + } + }); + + scheduler->process(); + + getSubscription()->dispatcher()->unbind(*tid); + } + + if (auto maybe_proposal = ordering_service_->onRequestProposal(round)) { + *response->mutable_proposal() = + static_cast( + maybe_proposal->get()) + ->getTransport(); + } return ::grpc::Status::OK; } diff --git a/irohad/ordering/impl/on_demand_os_server_grpc.hpp b/irohad/ordering/impl/on_demand_os_server_grpc.hpp index d918d088ecf..7d348101805 100644 --- a/irohad/ordering/impl/on_demand_os_server_grpc.hpp +++ b/irohad/ordering/impl/on_demand_os_server_grpc.hpp @@ -16,6 +16,7 @@ namespace iroha { namespace ordering { + class OnDemandOrderingService; namespace transport { /** @@ -29,13 +30,14 @@ namespace iroha { iroha::protocol::Transaction>; OnDemandOsServerGrpc( - std::shared_ptr ordering_service, + std::shared_ptr ordering_service, std::shared_ptr transaction_factory, std::shared_ptr batch_parser, std::shared_ptr transaction_batch_factory, - logger::LoggerPtr log); + logger::LoggerPtr log, + std::chrono::milliseconds delay); grpc::Status SendBatches(::grpc::ServerContext *context, const proto::BatchesRequest *request, @@ -47,7 +49,7 @@ namespace iroha { proto::ProposalResponse *response) override; private: - std::shared_ptr ordering_service_; + std::shared_ptr ordering_service_; std::shared_ptr transaction_factory_; std::shared_ptr @@ -56,6 +58,7 @@ namespace iroha { batch_factory_; logger::LoggerPtr log_; + std::chrono::milliseconds delay_; }; } // namespace transport diff --git a/irohad/ordering/impl/ordering_gate_cache/on_demand_cache.cpp b/irohad/ordering/impl/ordering_gate_cache/on_demand_cache.cpp deleted file mode 100644 index 01903db0d5c..00000000000 --- a/irohad/ordering/impl/ordering_gate_cache/on_demand_cache.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "ordering/impl/ordering_gate_cache/on_demand_cache.hpp" - -#include "interfaces/iroha_internal/transaction_batch.hpp" -#include "interfaces/transaction.hpp" - -using namespace iroha::ordering::cache; - -// TODO: IR-1864 13.11.18 kamilsa use nvi to separate business logic and locking -// logic - -void OnDemandCache::addToBack( - const OrderingGateCache::BatchesSetType &batches) { - std::unique_lock lock(mutex_); - circ_buffer.back().insert(batches.begin(), batches.end()); -} - -void OnDemandCache::remove(const OrderingGateCache::HashesSetType &hashes) { - std::unique_lock lock(mutex_); - for (auto &batches : circ_buffer) { - for (auto it = batches.begin(); it != batches.end();) { - if (std::any_of(it->get()->transactions().begin(), - it->get()->transactions().end(), - [&hashes](const auto &tx) { - return hashes.find(tx->hash()) != hashes.end(); - })) { - // returns iterator following the last removed element - // hence there is no increment in loop iteration_expression - it = batches.erase(it); - } else { - ++it; - } - } - } -} - -OrderingGateCache::BatchesSetType OnDemandCache::pop() { - std::unique_lock lock(mutex_); - BatchesSetType res; - std::swap(res, circ_buffer.front()); - // push empty set to remove front element - circ_buffer.push_back(BatchesSetType{}); - return res; -} - -const OrderingGateCache::BatchesSetType &OnDemandCache::head() const { - std::shared_lock lock(mutex_); - return circ_buffer.front(); -} - -const OrderingGateCache::BatchesSetType &OnDemandCache::tail() const { - std::shared_lock lock(mutex_); - return circ_buffer.back(); -} diff --git a/irohad/ordering/impl/ordering_gate_cache/on_demand_cache.hpp b/irohad/ordering/impl/ordering_gate_cache/on_demand_cache.hpp deleted file mode 100644 index bdc9e1ff2af..00000000000 --- a/irohad/ordering/impl/ordering_gate_cache/on_demand_cache.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_ON_DEMAND_CACHE_HPP -#define IROHA_ON_DEMAND_CACHE_HPP - -#include "ordering/impl/ordering_gate_cache/ordering_gate_cache.hpp" - -#include - -#include - -namespace iroha { - namespace ordering { - namespace cache { - - class OnDemandCache : public OrderingGateCache { - public: - void addToBack(const BatchesSetType &batches) override; - - BatchesSetType pop() override; - - void remove(const HashesSetType &hashes) override; - - virtual const BatchesSetType &head() const override; - - virtual const BatchesSetType &tail() const override; - - private: - mutable std::shared_timed_mutex mutex_; - using BatchesQueueType = boost::circular_buffer; - BatchesQueueType circ_buffer{3, BatchesSetType{}}; - }; - - } // namespace cache - } // namespace ordering -} // namespace iroha - -#endif // IROHA_ON_DEMAND_CACHE_HPP diff --git a/irohad/ordering/impl/ordering_gate_cache/ordering_gate_cache.cpp b/irohad/ordering/impl/ordering_gate_cache/ordering_gate_cache.cpp deleted file mode 100644 index f884eb8767c..00000000000 --- a/irohad/ordering/impl/ordering_gate_cache/ordering_gate_cache.cpp +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "ordering/impl/ordering_gate_cache/ordering_gate_cache.hpp" - -#include "interfaces/iroha_internal/transaction_batch.hpp" - -namespace iroha { - namespace ordering { - namespace cache { - - size_t OrderingGateCache::BatchPointerHasher::operator()( - const std::shared_ptr &a) - const { - return hasher_(a->reducedHash()); - } - - } // namespace cache - } // namespace ordering -} // namespace iroha diff --git a/irohad/ordering/impl/ordering_gate_cache/ordering_gate_cache.hpp b/irohad/ordering/impl/ordering_gate_cache/ordering_gate_cache.hpp deleted file mode 100644 index 88c957de09a..00000000000 --- a/irohad/ordering/impl/ordering_gate_cache/ordering_gate_cache.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_ON_DEMAND_ORDERING_CACHE_HPP -#define IROHA_ON_DEMAND_ORDERING_CACHE_HPP - -#include -#include - -#include "cryptography/hash.hpp" - -namespace shared_model { - namespace interface { - class TransactionBatch; - } -} // namespace shared_model - -namespace iroha { - namespace ordering { - namespace cache { - - /** - * Cache for transactions sent to ordering gate - */ - class OrderingGateCache { - private: - /** - * Hasher for the shared pointer on the batch. Uses batch's reduced hash - */ - struct BatchPointerHasher { - shared_model::crypto::Hash::Hasher hasher_; - - size_t operator()( - const std::shared_ptr - &a) const; - }; - - public: - /// type of the element in cache container. Set is used as it allows to - /// remove batch from BatchSet with O(1) complexity, which is the case - /// in remove method - using BatchesSetType = std::unordered_set< - std::shared_ptr, - BatchPointerHasher>; - - using HashesSetType = - std::unordered_set; - - /** - * Concatenates batches from the tail of the queue with provided batches - */ - virtual void addToBack(const BatchesSetType &batches) = 0; - - /** - * Pops the head batches and returns them - */ - virtual BatchesSetType pop() = 0; - - /** - * Removes batches by provided hashes from the head of the queue - */ - virtual void remove(const HashesSetType &hashes) = 0; - - /** - * Return the head batches - */ - virtual const BatchesSetType &head() const = 0; - - /** - * Return the tail batches - */ - virtual const BatchesSetType &tail() const = 0; - - virtual ~OrderingGateCache() = default; - }; - - } // namespace cache - - } // namespace ordering -} // namespace iroha - -#endif // IROHA_ON_DEMAND_ORDERING_CACHE_HPP diff --git a/irohad/ordering/impl/round_switch.hpp b/irohad/ordering/impl/round_switch.hpp new file mode 100644 index 00000000000..88c53c83352 --- /dev/null +++ b/irohad/ordering/impl/round_switch.hpp @@ -0,0 +1,29 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_ROUND_SWITCH_HPP +#define IROHA_ROUND_SWITCH_HPP + +#include + +#include "consensus/round.hpp" + +namespace iroha { + struct LedgerState; +} + +namespace iroha::ordering { + struct RoundSwitch { + consensus::Round next_round; + std::shared_ptr ledger_state; + + RoundSwitch(consensus::Round next_round, + std::shared_ptr ledger_state) + : next_round(std::move(next_round)), + ledger_state(std::move(ledger_state)) {} + }; +} // namespace iroha::ordering + +#endif // IROHA_ROUND_SWITCH_HPP diff --git a/irohad/ordering/impl/unique_creation_proposal_strategy.hpp b/irohad/ordering/impl/unique_creation_proposal_strategy.hpp deleted file mode 100644 index d84915cdc3f..00000000000 --- a/irohad/ordering/impl/unique_creation_proposal_strategy.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_UNIQUE_CREATION_PROPOSAL_STRATEGY_HPP -#define IROHA_UNIQUE_CREATION_PROPOSAL_STRATEGY_HPP - -#include "ordering/ordering_service_proposal_creation_strategy.hpp" - -#include -#include - -#include "common/ring_buffer.hpp" - -namespace iroha { - namespace ordering { - - /** - * Creating proposal once a round - */ - class UniqueCreationProposalStrategy : public ProposalCreationStrategy { - UniqueCreationProposalStrategy(UniqueCreationProposalStrategy const &) = - delete; - UniqueCreationProposalStrategy &operator=( - UniqueCreationProposalStrategy const &) = delete; - - UniqueCreationProposalStrategy(UniqueCreationProposalStrategy &&) = - delete; - UniqueCreationProposalStrategy &operator=( - UniqueCreationProposalStrategy &&) = delete; - - inline bool contains(RoundType round) { - bool is_exists = false; - requested_.foreach ([&is_exists, &round](auto /*h*/, auto const &data) { - if (round == data) { - is_exists = true; - return false; - } - return true; - }); - return is_exists; - } - - public: - UniqueCreationProposalStrategy() = default; - - void onCollaborationOutcome(RoundType /*round*/, - size_t /*peers_in_round*/) override {} - - bool shouldCreateRound(RoundType round) override { - std::lock_guard guard(mutex_); - return !contains(round); - } - - boost::optional onProposalRequest(RoundType round) override { - std::lock_guard guard(mutex_); - if (!contains(round)) { - requested_.push([](auto, auto &) {}, [](auto, auto &) {}, round); - } - return boost::none; - } - - private: - /// items count is something random must be more than 3 - using RoundCollectionType = containers::RingBuffer; - - std::mutex mutex_; - RoundCollectionType requested_; - }; - } // namespace ordering -} // namespace iroha - -#endif // IROHA_UNIQUE_CREATION_PROPOSAL_STRATEGY_HPP diff --git a/irohad/ordering/on_demand_ordering_service.hpp b/irohad/ordering/on_demand_ordering_service.hpp index fd50175c6af..6d0b1a10bcd 100644 --- a/irohad/ordering/on_demand_ordering_service.hpp +++ b/irohad/ordering/on_demand_ordering_service.hpp @@ -6,7 +6,18 @@ #ifndef IROHA_ON_DEMAND_ORDERING_SERVICE_HPP #define IROHA_ON_DEMAND_ORDERING_SERVICE_HPP -#include "ordering/on_demand_os_transport.hpp" +#include + +#include "consensus/round.hpp" +#include "cryptography/hash.hpp" +#include "interfaces/iroha_internal/transaction_batch.hpp" + +namespace shared_model { + namespace interface { + class TransactionBatch; + class Proposal; + } // namespace interface +} // namespace shared_model namespace iroha { namespace ordering { @@ -14,8 +25,49 @@ namespace iroha { /** * Ordering Service aka OS which can share proposals by request */ - class OnDemandOrderingService : public transport::OdOsNotification { + class OnDemandOrderingService { public: + virtual ~OnDemandOrderingService() = default; + + /** + * Type of stored proposals + */ + using ProposalType = shared_model::interface::Proposal; + + struct BatchPointerHasher { + shared_model::crypto::Hash::Hasher hasher_; + size_t operator()( + const std::shared_ptr &a) + const { + return hasher_(a->reducedHash()); + } + }; + + using BatchesSetType = std::unordered_set< + std::shared_ptr, + BatchPointerHasher, + shared_model::interface::BatchHashEquality>; + + /** + * Type of stored transaction batches + */ + using TransactionBatchType = + std::shared_ptr; + + /** + * Type of inserted collections + */ + using CollectionType = std::vector; + + /** + * Callback on receiving transactions + * @param batches - vector of passed transaction batches + */ + virtual void onBatches(CollectionType batches) = 0; + + virtual std::optional> + onRequestProposal(consensus::Round round) = 0; + using HashesSetType = std::unordered_set; @@ -37,8 +89,13 @@ namespace iroha { * @param f - callback function */ virtual void forCachedBatches( - std::function const &f) = 0; + std::function const &f) const = 0; + + virtual bool isEmptyBatchesCache() const = 0; + + virtual bool hasProposal(consensus::Round round) const = 0; + + virtual void processReceivedProposal(CollectionType batches) = 0; }; } // namespace ordering diff --git a/irohad/ordering/on_demand_os_transport.hpp b/irohad/ordering/on_demand_os_transport.hpp index 1f9ff4a58a1..b122a3d1004 100644 --- a/irohad/ordering/on_demand_os_transport.hpp +++ b/irohad/ordering/on_demand_os_transport.hpp @@ -7,14 +7,11 @@ #define IROHA_ON_DEMAND_OS_TRANSPORT_HPP #include -#include #include #include -#include #include "common/result_fwd.hpp" #include "consensus/round.hpp" -#include "cryptography/hash.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" namespace shared_model { @@ -34,25 +31,6 @@ namespace iroha { */ class OdOsNotification { public: - /** - * Type of stored proposals - */ - using ProposalType = shared_model::interface::Proposal; - - struct BatchPointerHasher { - shared_model::crypto::Hash::Hasher hasher_; - size_t operator()( - const std::shared_ptr - &a) const { - return hasher_(a->reducedHash()); - } - }; - - using BatchesSetType = std::unordered_set< - std::shared_ptr, - BatchPointerHasher, - shared_model::interface::BatchHashEquality>; - /** * Type of stored transaction batches */ @@ -74,10 +52,8 @@ namespace iroha { * Callback on request about proposal * @param round - number of collaboration round. * Calculated as block_height + 1 - * @return proposal for requested round */ - virtual boost::optional> - onRequestProposal(consensus::Round round) = 0; + virtual void onRequestProposal(consensus::Round round) = 0; virtual ~OdOsNotification() = default; }; diff --git a/irohad/ordering/ordering_service_proposal_creation_strategy.hpp b/irohad/ordering/ordering_service_proposal_creation_strategy.hpp deleted file mode 100644 index c23832620d8..00000000000 --- a/irohad/ordering/ordering_service_proposal_creation_strategy.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright Soramitsu Co., Ltd. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0 - */ - -#ifndef IROHA_ORDERING_SERVICE_PROPOSAL_CREATION_STRATEGY_HPP -#define IROHA_ORDERING_SERVICE_PROPOSAL_CREATION_STRATEGY_HPP - -#include -#include "consensus/round.hpp" - -namespace iroha { - namespace ordering { - - /** - * Class provides a strategy for creation proposals regarding to new rounds - * and requests from other peers - */ - class ProposalCreationStrategy { - public: - /// shortcut for round type - using RoundType = consensus::Round; - - /** - * Indicates the start of new round. - * @param round - proposal round which has started - * @param peers_in_round - peers which participate in new round - */ - virtual void onCollaborationOutcome(RoundType round, - size_t peers_in_round) = 0; - - /** - * @param round - new consensus round - * @return true, if proposal should be created in the new round - */ - virtual bool shouldCreateRound(RoundType round) = 0; - - /** - * Notify the strategy about proposal request - * @param requested_round - in which round proposal is requested - * @return round where proposal is required to be created immediately - */ - virtual boost::optional onProposalRequest( - RoundType requested_round) = 0; - - virtual ~ProposalCreationStrategy() = default; - }; - } // namespace ordering -} // namespace iroha - -#endif // IROHA_ORDERING_SERVICE_PROPOSAL_CREATION_STRATEGY_HPP diff --git a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp index 9ac17f3460d..2fb0af13462 100644 --- a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp +++ b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp @@ -9,301 +9,200 @@ #include "interfaces/transaction.hpp" #include "multi_sig_transactions/state/mst_state.hpp" -namespace iroha { - - PendingTransactionStorageImpl::PendingTransactionStorageImpl( - PendingTransactionStorageImpl::private_tag) {} - - std::shared_ptr - PendingTransactionStorageImpl::create( - StateObservable updated_batches, - BatchObservable prepared_batch, - BatchObservable expired_batch, - PreparedTransactionsObservable prepared_txs, - FinalizedTransactionsObservable finalized_txs) { - auto storage = std::make_shared( - PendingTransactionStorageImpl::private_tag{}); - std::weak_ptr storage_(storage); - - auto subscription = rxcpp::composite_subscription(); - updated_batches.subscribe( - subscription, [storage_, subscription](SharedState const &batches) { - if (auto storage = storage_.lock()) { - storage->updatedBatchesHandler(batches); - } else { - subscription.unsubscribe(); - } - }); - subscription = rxcpp::composite_subscription(); - prepared_batch.subscribe( - subscription, - [storage_, subscription](SharedBatch const &preparedBatch) { - if (auto storage = storage_.lock()) { - storage->removeBatch(preparedBatch); - } else { - subscription.unsubscribe(); - } - }); - subscription = rxcpp::composite_subscription(); - expired_batch.subscribe( - subscription, - [storage_, subscription](SharedBatch const &expiredBatch) { - if (auto storage = storage_.lock()) { - storage->removeBatch(expiredBatch); - } else { - subscription.unsubscribe(); - } - }); - subscription = rxcpp::composite_subscription(); - prepared_txs.subscribe( - subscription, - [storage_, subscription]( - PreparedTransactionDescriptor const &prepared_transaction) { - if (auto storage = storage_.lock()) { - storage->removeBatch(prepared_transaction); - } else { - subscription.unsubscribe(); - } - }); - subscription = rxcpp::composite_subscription(); - finalized_txs.subscribe( - subscription, - [storage_, - subscription](shared_model::interface::types::HashType const &hash) { - if (auto storage = storage_.lock()) { - storage->removeTransaction(hash); - } else { - subscription.unsubscribe(); - } - }); - - return storage; - } - - PendingTransactionStorageImpl::SharedTxsCollectionType - PendingTransactionStorageImpl::getPendingTransactions( - const AccountIdType &account_id) const { - std::shared_lock lock(mutex_); - auto account_batches_iterator = storage_.find(account_id); - if (storage_.end() != account_batches_iterator) { - SharedTxsCollectionType result; - for (const auto &batch : account_batches_iterator->second.batches) { - auto &txs = batch->transactions(); - result.insert(result.end(), txs.begin(), txs.end()); - } - return result; +using iroha::PendingTransactionStorageImpl; + +PendingTransactionStorageImpl::SharedTxsCollectionType +PendingTransactionStorageImpl::getPendingTransactions( + const AccountIdType &account_id) const { + std::shared_lock lock(mutex_); + auto account_batches_iterator = storage_.find(account_id); + if (storage_.end() != account_batches_iterator) { + SharedTxsCollectionType result; + for (const auto &batch : account_batches_iterator->second.batches) { + auto &txs = batch->transactions(); + result.insert(result.end(), txs.begin(), txs.end()); } - return {}; + return result; } - - expected::Result - PendingTransactionStorageImpl::getPendingTransactions( - const shared_model::interface::types::AccountIdType &account_id, - const shared_model::interface::types::TransactionsNumberType page_size, - const std::optional - &first_tx_hash) const { - BOOST_ASSERT_MSG(page_size > 0, "Page size has to be positive"); - std::shared_lock lock(mutex_); - auto account_batches_iterator = storage_.find(account_id); - if (storage_.end() == account_batches_iterator) { - if (first_tx_hash) { - return iroha::expected::makeError( - PendingTransactionStorage::ErrorCode::kNotFound); - } else { - return iroha::expected::makeValue( - PendingTransactionStorage::Response{}); - } - } - auto &account_batches = account_batches_iterator->second; - auto batch_iterator = account_batches.batches.begin(); + return {}; +} + +iroha::expected::Result +PendingTransactionStorageImpl::getPendingTransactions( + const shared_model::interface::types::AccountIdType &account_id, + const shared_model::interface::types::TransactionsNumberType page_size, + const std::optional + &first_tx_hash) const { + BOOST_ASSERT_MSG(page_size > 0, "Page size has to be positive"); + std::shared_lock lock(mutex_); + auto account_batches_iterator = storage_.find(account_id); + if (storage_.end() == account_batches_iterator) { if (first_tx_hash) { - auto index_iterator = account_batches.index.find(*first_tx_hash); - if (account_batches.index.end() == index_iterator) { - return iroha::expected::makeError( - PendingTransactionStorage::ErrorCode::kNotFound); - } - batch_iterator = index_iterator->second; + return iroha::expected::makeError( + PendingTransactionStorage::ErrorCode::kNotFound); + } else { + return iroha::expected::makeValue(PendingTransactionStorage::Response{}); } - BOOST_ASSERT_MSG(account_batches.batches.end() != batch_iterator, - "Empty account batches entry was not removed"); - - PendingTransactionStorage::Response response; - response.all_transactions_size = account_batches.all_transactions_quantity; - auto remaining_space = page_size; - while (account_batches.batches.end() != batch_iterator - and remaining_space - >= batch_iterator->get()->transactions().size()) { - auto &txs = batch_iterator->get()->transactions(); - response.transactions.insert( - response.transactions.end(), txs.begin(), txs.end()); - remaining_space -= txs.size(); - ++batch_iterator; - } - if (account_batches.batches.end() != batch_iterator) { - shared_model::interface::PendingTransactionsPageResponse::BatchInfo - next_batch_info; - auto &txs = batch_iterator->get()->transactions(); - next_batch_info.first_tx_hash = txs.front()->hash(); - next_batch_info.batch_size = txs.size(); - response.next_batch_info = std::move(next_batch_info); - } - return iroha::expected::makeValue(std::move(response)); } - - std::set - PendingTransactionStorageImpl::batchCreators(const TransactionBatch &batch) { - std::set creators; - for (const auto &transaction : batch.transactions()) { - creators.insert(transaction->creatorAccountId()); + auto &account_batches = account_batches_iterator->second; + auto batch_iterator = account_batches.batches.begin(); + if (first_tx_hash) { + auto index_iterator = account_batches.index.find(*first_tx_hash); + if (account_batches.index.end() == index_iterator) { + return iroha::expected::makeError( + PendingTransactionStorage::ErrorCode::kNotFound); } - return creators; + batch_iterator = index_iterator->second; } - - void PendingTransactionStorageImpl::updatedBatchesHandler( - const SharedState &updated_batches) { - // need to test performance somehow - where to put the lock - std::unique_lock lock(mutex_); - updated_batches->iterateBatches([this](const auto &batch) { - if (isReplay(*batch)) { - return; - } - - auto first_tx_hash = batch->transactions().front()->hash(); - auto batch_creators = batchCreators(*batch); - auto batch_size = batch->transactions().size(); - for (const auto &creator : batch_creators) { - auto account_batches_iterator = storage_.find(creator); - if (storage_.end() == account_batches_iterator) { - auto insertion_result = storage_.emplace( - creator, PendingTransactionStorageImpl::AccountBatches{}); - BOOST_ASSERT(insertion_result.second); - account_batches_iterator = insertion_result.first; - } - - auto &account_batches = account_batches_iterator->second; - auto index_iterator = account_batches.index.find(first_tx_hash); - if (index_iterator == account_batches.index.end()) { - // inserting the batch - account_batches.all_transactions_quantity += batch_size; - account_batches.batches.push_back(batch); - auto inserted_batch_iterator = - std::prev(account_batches.batches.end()); - account_batches.index.emplace(first_tx_hash, inserted_batch_iterator); - for (auto &tx : batch->transactions()) { - account_batches.txs_to_batches.insert({tx->hash(), batch}); - } - } else { - // updating batch - auto &account_batch = index_iterator->second; - *account_batch = batch; - } - } - }); + BOOST_ASSERT_MSG(account_batches.batches.end() != batch_iterator, + "Empty account batches entry was not removed"); + + PendingTransactionStorage::Response response; + response.all_transactions_size = account_batches.all_transactions_quantity; + auto remaining_space = page_size; + while (account_batches.batches.end() != batch_iterator + and remaining_space >= batch_iterator->get()->transactions().size()) { + auto &txs = batch_iterator->get()->transactions(); + response.transactions.insert( + response.transactions.end(), txs.begin(), txs.end()); + remaining_space -= txs.size(); + ++batch_iterator; } - - bool PendingTransactionStorageImpl::isReplay( - shared_model::interface::TransactionBatch const &batch) { - auto cache_ptr = presence_cache_.lock(); - if (!cache_ptr) { - return false; - } - - auto cache_presence = cache_ptr->check(batch); - if (!cache_presence) { - return false; - } - - return std::any_of(cache_presence->begin(), - cache_presence->end(), - &ametsuchi::isAlreadyProcessed); + if (account_batches.batches.end() != batch_iterator) { + shared_model::interface::PendingTransactionsPageResponse::BatchInfo + next_batch_info; + auto &txs = batch_iterator->get()->transactions(); + next_batch_info.first_tx_hash = txs.front()->hash(); + next_batch_info.batch_size = txs.size(); + response.next_batch_info = std::move(next_batch_info); } - - void PendingTransactionStorageImpl::insertPresenceCache( - std::shared_ptr &cache) { - assert(!!cache); - presence_cache_ = cache; + return iroha::expected::makeValue(std::move(response)); +} + +std::set +PendingTransactionStorageImpl::batchCreators(const TransactionBatch &batch) { + std::set creators; + for (const auto &transaction : batch.transactions()) { + creators.insert(transaction->creatorAccountId()); } + return creators; +} + +void PendingTransactionStorageImpl::updatedBatchesHandler( + const SharedState &updated_batches) { + // need to test performance somehow - where to put the lock + std::unique_lock lock(mutex_); + updated_batches->iterateBatches([this](const auto &batch) { + if (isReplay(*batch)) { + return; + } - inline void PendingTransactionStorageImpl::removeFromStorage( - const HashType &first_tx_hash, - const std::set &batch_creators, - uint64_t batch_size) { - // outer scope has to acquire unique lock over mutex_ + auto first_tx_hash = batch->transactions().front()->hash(); + auto batch_creators = batchCreators(*batch); + auto batch_size = batch->transactions().size(); for (const auto &creator : batch_creators) { auto account_batches_iterator = storage_.find(creator); - if (account_batches_iterator != storage_.end()) { - auto &account_batches = account_batches_iterator->second; - auto index_iterator = account_batches.index.find(first_tx_hash); - if (index_iterator != account_batches.index.end()) { - auto &batch_iterator = index_iterator->second; - BOOST_ASSERT(batch_iterator != account_batches.batches.end()); - account_batches.txs_to_batches.right.erase(*batch_iterator); - account_batches.batches.erase(batch_iterator); - account_batches.index.erase(index_iterator); - account_batches.all_transactions_quantity -= batch_size; - } - if (0 == account_batches.all_transactions_quantity) { - storage_.erase(account_batches_iterator); + if (storage_.end() == account_batches_iterator) { + auto insertion_result = storage_.emplace( + creator, PendingTransactionStorageImpl::AccountBatches{}); + BOOST_ASSERT(insertion_result.second); + account_batches_iterator = insertion_result.first; + } + + auto &account_batches = account_batches_iterator->second; + auto index_iterator = account_batches.index.find(first_tx_hash); + if (index_iterator == account_batches.index.end()) { + // inserting the batch + account_batches.all_transactions_quantity += batch_size; + account_batches.batches.push_back(batch); + auto inserted_batch_iterator = std::prev(account_batches.batches.end()); + account_batches.index.emplace(first_tx_hash, inserted_batch_iterator); + for (auto &tx : batch->transactions()) { + account_batches.txs_to_batches.insert({tx->hash(), batch}); } + } else { + // updating batch + auto &account_batch = index_iterator->second; + *account_batch = batch; } } + }); +} + +bool PendingTransactionStorageImpl::isReplay( + shared_model::interface::TransactionBatch const &batch) { + auto cache_ptr = presence_cache_.lock(); + if (!cache_ptr) { + return false; } - void PendingTransactionStorageImpl::removeBatch(const SharedBatch &batch) { - auto creators = batchCreators(*batch); - auto first_tx_hash = batch->transactions().front()->hash(); - auto batch_size = batch->transactions().size(); - std::unique_lock lock(mutex_); - removeFromStorage(first_tx_hash, creators, batch_size); + auto cache_presence = cache_ptr->check(batch); + if (!cache_presence) { + return false; } - void PendingTransactionStorageImpl::removeBatch( - const PreparedTransactionDescriptor &prepared_transaction) { - boost::optional> creators = boost::none; - boost::optional batch_size = boost::none; - auto &creator_id = prepared_transaction.first; - auto &first_transaction_hash = prepared_transaction.second; - { - std::shared_lock lock(mutex_); - auto account_batches_iterator = storage_.find(creator_id); - if (account_batches_iterator != storage_.end()) { - auto &account_batches = account_batches_iterator->second; - auto index_iterator = - account_batches.index.find(first_transaction_hash); - if (index_iterator != account_batches.index.end()) { - auto &batch_iterator = index_iterator->second; - BOOST_ASSERT(batch_iterator != account_batches.batches.end()); - creators = batchCreators(**batch_iterator); - batch_size = boost::size((*batch_iterator)->transactions()); - } + return std::any_of(cache_presence->begin(), + cache_presence->end(), + &ametsuchi::isAlreadyProcessed); +} + +void PendingTransactionStorageImpl::insertPresenceCache( + std::shared_ptr &cache) { + assert(!!cache); + presence_cache_ = cache; +} + +inline void PendingTransactionStorageImpl::removeFromStorage( + const HashType &first_tx_hash, + const std::set &batch_creators, + uint64_t batch_size) { + // outer scope has to acquire unique lock over mutex_ + for (const auto &creator : batch_creators) { + auto account_batches_iterator = storage_.find(creator); + if (account_batches_iterator != storage_.end()) { + auto &account_batches = account_batches_iterator->second; + auto index_iterator = account_batches.index.find(first_tx_hash); + if (index_iterator != account_batches.index.end()) { + auto &batch_iterator = index_iterator->second; + BOOST_ASSERT(batch_iterator != account_batches.batches.end()); + account_batches.txs_to_batches.right.erase(*batch_iterator); + account_batches.batches.erase(batch_iterator); + account_batches.index.erase(index_iterator); + account_batches.all_transactions_quantity -= batch_size; + } + if (0 == account_batches.all_transactions_quantity) { + storage_.erase(account_batches_iterator); } - } - if (creators and batch_size) { - std::unique_lock lock(mutex_); - removeFromStorage(first_transaction_hash, *creators, *batch_size); } } - - void PendingTransactionStorageImpl::removeTransaction(HashType const &hash) { - std::shared_lock read_lock(mutex_); - for (auto &p : storage_) { - auto &txs_index = p.second.txs_to_batches; - auto it = txs_index.left.find(hash); - if (txs_index.left.end() != it) { - auto batch = it->second; - assert(!!batch); - - auto const &transactions = batch->transactions(); - auto const &first_transaction_hash = transactions.front()->hash(); - auto const &creators = batchCreators(*batch); - auto batch_size = transactions.size(); - read_lock.unlock(); - std::unique_lock write_lock(mutex_); - removeFromStorage(first_transaction_hash, creators, batch_size); - return; - } +} + +void PendingTransactionStorageImpl::removeBatch(const SharedBatch &batch) { + auto creators = batchCreators(*batch); + auto first_tx_hash = batch->transactions().front()->hash(); + auto batch_size = batch->transactions().size(); + std::unique_lock lock(mutex_); + removeFromStorage(first_tx_hash, creators, batch_size); +} + +void PendingTransactionStorageImpl::removeTransaction(HashType const &hash) { + std::shared_lock read_lock(mutex_); + for (auto &p : storage_) { + auto &txs_index = p.second.txs_to_batches; + auto it = txs_index.left.find(hash); + if (txs_index.left.end() != it) { + auto batch = it->second; + assert(!!batch); + + auto const &transactions = batch->transactions(); + auto const &first_transaction_hash = transactions.front()->hash(); + auto const &creators = batchCreators(*batch); + auto batch_size = transactions.size(); + read_lock.unlock(); + std::unique_lock write_lock(mutex_); + removeFromStorage(first_transaction_hash, creators, batch_size); + return; } } - -} // namespace iroha +} diff --git a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.hpp b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.hpp index 4f3d10381c3..33d77457394 100644 --- a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.hpp +++ b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.hpp @@ -17,19 +17,12 @@ #include #include #include -#include #include "cryptography/hash.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" #include "multi_sig_transactions/hash.hpp" namespace iroha { - - class MstState; - class PendingTransactionStorageImpl : public PendingTransactionStorage { - private: - struct private_tag {}; - public: using AccountIdType = shared_model::interface::types::AccountIdType; using HashType = shared_model::interface::types::HashType; @@ -38,26 +31,6 @@ namespace iroha { using TransactionBatch = shared_model::interface::TransactionBatch; using SharedState = std::shared_ptr; using SharedBatch = std::shared_ptr; - using StateObservable = rxcpp::observable; - using BatchObservable = rxcpp::observable; - using PreparedTransactionDescriptor = std::pair; - using PreparedTransactionsObservable = - rxcpp::observable; - using FinalizedTransactionsObservable = rxcpp::observable; - - PendingTransactionStorageImpl(PendingTransactionStorageImpl::private_tag); - - PendingTransactionStorageImpl(PendingTransactionStorageImpl const &) = - delete; - PendingTransactionStorageImpl &operator=( - PendingTransactionStorageImpl const &) = delete; - - static std::shared_ptr create( - StateObservable updated_batches, - BatchObservable prepared_batch, - BatchObservable expired_batch, - PreparedTransactionsObservable prepared_txs, - FinalizedTransactionsObservable finalized_txs); SharedTxsCollectionType getPendingTransactions( const AccountIdType &account_id) const override; @@ -71,19 +44,17 @@ namespace iroha { void insertPresenceCache( std::shared_ptr &cache) override; - private: - void updatedBatchesHandler(const SharedState &updated_batches); + void removeTransaction(HashType const &hash) override; - void removeBatch(const SharedBatch &batch); + void updatedBatchesHandler(const SharedState &updated_batches) override; - void removeBatch(const PreparedTransactionDescriptor &prepared_transaction); + void removeBatch(const SharedBatch &batch) override; + private: void removeFromStorage(const HashType &first_tx_hash, const std::set &batch_creators, uint64_t batch_size); - void removeTransaction(HashType const &hash); - static std::set batchCreators(const TransactionBatch &batch); bool isReplay(shared_model::interface::TransactionBatch const &batch); diff --git a/irohad/pending_txs_storage/pending_txs_storage.hpp b/irohad/pending_txs_storage/pending_txs_storage.hpp index 8b5a8b61fbe..285518e7b7f 100644 --- a/irohad/pending_txs_storage/pending_txs_storage.hpp +++ b/irohad/pending_txs_storage/pending_txs_storage.hpp @@ -15,6 +15,7 @@ #include "interfaces/query_responses/pending_transactions_page_response.hpp" namespace iroha { + class MstState; /** * Interface of storage for not fully signed transactions. @@ -81,6 +82,16 @@ namespace iroha { const std::optional &first_tx_hash) const = 0; + virtual void removeTransaction( + shared_model::interface::types::HashType const &hash) = 0; + + virtual void updatedBatchesHandler( + std::shared_ptr const &updated_batches) = 0; + + virtual void removeBatch( + std::shared_ptr const + &batch) = 0; + virtual ~PendingTransactionStorage() = default; }; diff --git a/irohad/simulator/CMakeLists.txt b/irohad/simulator/CMakeLists.txt index 8d067e8ddef..ed1d451e2a5 100644 --- a/irohad/simulator/CMakeLists.txt +++ b/irohad/simulator/CMakeLists.txt @@ -8,7 +8,6 @@ add_library(simulator target_link_libraries(simulator consensus_round shared_model_interfaces - rxcpp logger common ordering_gate_common diff --git a/irohad/simulator/block_creator.hpp b/irohad/simulator/block_creator.hpp index 043c920426c..7c3006ee451 100644 --- a/irohad/simulator/block_creator.hpp +++ b/irohad/simulator/block_creator.hpp @@ -6,15 +6,11 @@ #ifndef IROHA_BLOCK_CREATOR_HPP #define IROHA_BLOCK_CREATOR_HPP -#include #include "simulator/block_creator_common.hpp" namespace iroha { - namespace validation { - struct VerifiedProposalAndErrors; - } - namespace simulator { + struct VerifiedProposalCreatorEvent; /** * Interface for creating blocks from proposal @@ -24,16 +20,8 @@ namespace iroha { /** * Creates a block from given proposal and top block info */ - virtual boost::optional> - processVerifiedProposal( - const std::shared_ptr - &verified_proposal_and_errors, - const TopBlockInfo &top_block_info) = 0; - - /** - * Emit blocks made from proposals - */ - virtual rxcpp::observable onBlock() = 0; + virtual BlockCreatorEvent processVerifiedProposal( + VerifiedProposalCreatorEvent const &event) = 0; virtual ~BlockCreator() = default; }; diff --git a/irohad/simulator/impl/simulator.cpp b/irohad/simulator/impl/simulator.cpp index 0fca1451d53..15e7afeec3a 100644 --- a/irohad/simulator/impl/simulator.cpp +++ b/irohad/simulator/impl/simulator.cpp @@ -5,7 +5,6 @@ #include "simulator/impl/simulator.hpp" -#include #include "ametsuchi/command_executor.hpp" #include "common/bind.hpp" #include "interfaces/iroha_internal/block.hpp" @@ -17,7 +16,6 @@ namespace iroha { Simulator::Simulator( std::unique_ptr command_executor, - std::shared_ptr ordering_gate, std::shared_ptr statefulValidator, std::shared_ptr factory, std::shared_ptr crypto_signer, @@ -25,104 +23,67 @@ namespace iroha { block_factory, logger::LoggerPtr log) : command_executor_(std::move(command_executor)), - notifier_(notifier_lifetime_), - block_notifier_(block_notifier_lifetime_), validator_(std::move(statefulValidator)), ametsuchi_factory_(std::move(factory)), crypto_signer_(std::move(crypto_signer)), block_factory_(std::move(block_factory)), - log_(std::move(log)) { - ordering_gate->onProposal().subscribe( - proposal_subscription_, [this](const network::OrderingEvent &event) { - if (event.proposal) { - auto validated_proposal_and_errors = - this->processProposal(*getProposalUnsafe(event)); + log_(std::move(log)) {} - notifier_.get_subscriber().on_next( - VerifiedProposalCreatorEvent{validated_proposal_and_errors, - event.round, - event.ledger_state}); - } else { - notifier_.get_subscriber().on_next(VerifiedProposalCreatorEvent{ - boost::none, event.round, event.ledger_state}); - } - }); + VerifiedProposalCreatorEvent Simulator::processProposal( + network::OrderingEvent const &event) { + if (event.proposal) { + auto const &proposal = *getProposalUnsafe(event); + log_->info("process proposal: {}", proposal); - notifier_.get_observable().subscribe( - verified_proposal_subscription_, - [this](const VerifiedProposalCreatorEvent &event) { - if (event.verified_proposal_result) { - auto proposal_and_errors = getVerifiedProposalUnsafe(event); - auto block = this->processVerifiedProposal( - proposal_and_errors, event.ledger_state->top_block_info); - if (block) { - block_notifier_.get_subscriber().on_next(BlockCreatorEvent{ - RoundData{proposal_and_errors->verified_proposal, *block}, - event.round, - event.ledger_state}); - } - } else { - block_notifier_.get_subscriber().on_next(BlockCreatorEvent{ - boost::none, event.round, event.ledger_state}); - } - }); - } - - Simulator::~Simulator() { - notifier_lifetime_.unsubscribe(); - block_notifier_lifetime_.unsubscribe(); - proposal_subscription_.unsubscribe(); - verified_proposal_subscription_.unsubscribe(); - } - - rxcpp::observable - Simulator::onVerifiedProposal() { - return notifier_.get_observable(); - } + auto storage = + ametsuchi_factory_->createTemporaryWsv(command_executor_); - std::shared_ptr - Simulator::processProposal( - const shared_model::interface::Proposal &proposal) { - log_->info("process proposal: {}", proposal); + std::shared_ptr + validated_proposal_and_errors = + validator_->validate(proposal, *storage); + ametsuchi_factory_->prepareBlock(std::move(storage)); - auto storage = ametsuchi_factory_->createTemporaryWsv(command_executor_); - - std::shared_ptr - validated_proposal_and_errors = - validator_->validate(proposal, *storage); - ametsuchi_factory_->prepareBlock(std::move(storage)); - - return validated_proposal_and_errors; - } - - boost::optional> - Simulator::processVerifiedProposal( - const std::shared_ptr - &verified_proposal_and_errors, - const TopBlockInfo &top_block_info) { - const auto &proposal = verified_proposal_and_errors->verified_proposal; - if (proposal) - log_->info("process verified proposal: {}", *proposal); - else - log_->info("process verified proposal: no proposal"); - std::vector rejected_hashes; - for (const auto &rejected_tx : - verified_proposal_and_errors->rejected_transactions) { - rejected_hashes.push_back(rejected_tx.tx_hash); + return VerifiedProposalCreatorEvent{ + validated_proposal_and_errors, event.round, event.ledger_state}; + } else { + return VerifiedProposalCreatorEvent{ + boost::none, event.round, event.ledger_state}; } - std::shared_ptr block = - block_factory_->unsafeCreateBlock(top_block_info.height + 1, - top_block_info.top_hash, - proposal->createdTime(), - proposal->transactions(), - rejected_hashes); - crypto_signer_->sign(*block); - log_->info("Created block: {}", *block); - return block; } - rxcpp::observable Simulator::onBlock() { - return block_notifier_.get_observable(); + BlockCreatorEvent Simulator::processVerifiedProposal( + VerifiedProposalCreatorEvent const &event) { + if (event.verified_proposal_result) { + auto verified_proposal_and_errors = getVerifiedProposalUnsafe(event); + auto const &top_block_info = event.ledger_state->top_block_info; + auto const &proposal = verified_proposal_and_errors->verified_proposal; + if (proposal) { + log_->info("process verified proposal: {}", *proposal); + } else { + log_->info("process verified proposal: no proposal"); + } + std::vector rejected_hashes; + rejected_hashes.reserve( + verified_proposal_and_errors->rejected_transactions.size()); + for (const auto &rejected_tx : + verified_proposal_and_errors->rejected_transactions) { + rejected_hashes.push_back(rejected_tx.tx_hash); + } + std::shared_ptr block = + block_factory_->unsafeCreateBlock(top_block_info.height + 1, + top_block_info.top_hash, + proposal->createdTime(), + proposal->transactions(), + std::move(rejected_hashes)); + crypto_signer_->sign(*block); + log_->info("Created block: {}", *block); + return BlockCreatorEvent{ + RoundData{verified_proposal_and_errors->verified_proposal, block}, + event.round, + event.ledger_state}; + } else { + return BlockCreatorEvent{boost::none, event.round, event.ledger_state}; + } } } // namespace simulator diff --git a/irohad/simulator/impl/simulator.hpp b/irohad/simulator/impl/simulator.hpp index 1af966f455e..e153529e9f2 100644 --- a/irohad/simulator/impl/simulator.hpp +++ b/irohad/simulator/impl/simulator.hpp @@ -9,8 +9,6 @@ #include "simulator/block_creator.hpp" #include "simulator/verified_proposal_creator.hpp" -#include -#include #include "ametsuchi/temporary_factory.hpp" #include "cryptography/crypto_provider/abstract_crypto_model_signer.hpp" #include "interfaces/iroha_internal/unsafe_block_factory.hpp" @@ -34,7 +32,6 @@ namespace iroha { // TODO IR-598 mboldyrev 2019.08.10: remove command_executor from // Simulator std::unique_ptr command_executor, - std::shared_ptr ordering_gate, std::shared_ptr statefulValidator, std::shared_ptr factory, std::shared_ptr crypto_signer, @@ -42,34 +39,16 @@ namespace iroha { block_factory, logger::LoggerPtr log); - ~Simulator() override; + VerifiedProposalCreatorEvent processProposal( + network::OrderingEvent const &event) override; - std::shared_ptr processProposal( - const shared_model::interface::Proposal &proposal) override; - - rxcpp::observable onVerifiedProposal() - override; - - boost::optional> - processVerifiedProposal( - const std::shared_ptr - &verified_proposal_and_errors, - const TopBlockInfo &top_block_info) override; - - rxcpp::observable onBlock() override; + BlockCreatorEvent processVerifiedProposal( + VerifiedProposalCreatorEvent const &event) override; private: // internal std::shared_ptr command_executor_; - rxcpp::composite_subscription notifier_lifetime_; - rxcpp::subjects::subject notifier_; - rxcpp::composite_subscription block_notifier_lifetime_; - rxcpp::subjects::subject block_notifier_; - - rxcpp::composite_subscription proposal_subscription_; - rxcpp::composite_subscription verified_proposal_subscription_; - std::shared_ptr validator_; std::shared_ptr ametsuchi_factory_; std::shared_ptr crypto_signer_; diff --git a/irohad/simulator/verified_proposal_creator.hpp b/irohad/simulator/verified_proposal_creator.hpp index a94005e4165..bebebde0729 100644 --- a/irohad/simulator/verified_proposal_creator.hpp +++ b/irohad/simulator/verified_proposal_creator.hpp @@ -6,20 +6,12 @@ #ifndef IROHA_VERIFIED_PROPOSAL_CREATOR_HPP #define IROHA_VERIFIED_PROPOSAL_CREATOR_HPP -#include #include "simulator/verified_proposal_creator_common.hpp" -namespace shared_model { - namespace interface { - class Proposal; - } // namespace interface -} // namespace shared_model - namespace iroha { - namespace consensus { - struct Round; - } // namespace consensus - + namespace network { + struct OrderingEvent; + } namespace simulator { /** @@ -30,14 +22,8 @@ namespace iroha { /** * Execute stateful validation for given proposal */ - virtual std::shared_ptr - processProposal(const shared_model::interface::Proposal &proposal) = 0; - - /** - * Emit proposals which were verified by stateful validator - */ - virtual rxcpp::observable - onVerifiedProposal() = 0; + virtual VerifiedProposalCreatorEvent processProposal( + network::OrderingEvent const &event) = 0; virtual ~VerifiedProposalCreator() = default; }; diff --git a/irohad/subscription/async_dispatcher_impl.hpp b/irohad/subscription/async_dispatcher_impl.hpp new file mode 100644 index 00000000000..5b0d3f16bba --- /dev/null +++ b/irohad/subscription/async_dispatcher_impl.hpp @@ -0,0 +1,157 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_SUBSCRIPTION_ASYNC_DISPATCHER_IMPL_HPP +#define IROHA_SUBSCRIPTION_ASYNC_DISPATCHER_IMPL_HPP + +#include "subscription/dispatcher.hpp" + +#include "common/common.hpp" +#include "subscription/thread_handler.hpp" + +namespace iroha::subscription { + + template + class AsyncDispatcher final : public IDispatcher, + utils::NoCopy, + utils::NoMove { + public: + static constexpr uint32_t kHandlersCount = kCount; + static constexpr uint32_t kPoolThreadsCount = kPoolSize; + + private: + using Parent = IDispatcher; + + struct SchedulerContext { + /// Scheduler to execute tasks + std::shared_ptr handler; + + /// Shows if this handler is static or if it was created to + /// execute a single task and should be deleted after performing it + bool is_temporary; + }; + + SchedulerContext handlers_[kHandlersCount]; + SchedulerContext pool_[kPoolThreadsCount]; + + std::atomic_int64_t temporary_handlers_tasks_counter_; + std::atomic is_disposed_; + + struct BoundContexts { + typename Parent::Tid next_tid_offset = 0u; + std::unordered_map contexts; + }; + utils::ReadWriteObject bound_; + + inline SchedulerContext findHandler(typename Parent::Tid const tid) { + if (tid < kHandlersCount) + return handlers_[tid]; + + if (auto context = + bound_.sharedAccess([tid](BoundContexts const &bound) + -> std::optional { + if (auto it = bound.contexts.find(tid); + it != bound.contexts.end()) + return it->second; + return std::nullopt; + })) + return *context; + + for (auto &handler : pool_) + if (!handler.handler->isBusy()) + return handler; + + return SchedulerContext{ + std::make_shared(), + true // temporary + }; + } + + public: + AsyncDispatcher() { + temporary_handlers_tasks_counter_.store(0); + is_disposed_ = false; + for (auto &h : handlers_) { + h.handler = std::make_shared(); + h.is_temporary = false; + } + for (auto &h : pool_) { + h.handler = std::make_shared(); + h.is_temporary = false; + } + } + + void dispose() override { + is_disposed_ = true; + for (auto &h : handlers_) h.handler->dispose(); + for (auto &h : pool_) h.handler->dispose(); + + while (temporary_handlers_tasks_counter_.load() != 0) + std::this_thread::sleep_for(std::chrono::microseconds(0ull)); + } + + void add(typename Parent::Tid tid, typename Parent::Task &&task) override { + if (is_disposed_.load()) + return; + + auto h = findHandler(tid); + if (!h.is_temporary) + h.handler->add(std::move(task)); + else { + ++temporary_handlers_tasks_counter_; + h.handler->add([this, h, task{std::move(task)}]() mutable { + if (!is_disposed_.load()) + task(); + --temporary_handlers_tasks_counter_; + h.handler->dispose(false); + }); + } + } + + void addDelayed(typename Parent::Tid tid, + std::chrono::microseconds timeout, + typename Parent::Task &&task) override { + if (is_disposed_.load()) + return; + + auto h = findHandler(tid); + if (!h.is_temporary) + h.handler->addDelayed(timeout, std::move(task)); + else { + ++temporary_handlers_tasks_counter_; + h.handler->addDelayed(timeout, + [this, h, task{std::move(task)}]() mutable { + if (!is_disposed_.load()) + task(); + --temporary_handlers_tasks_counter_; + h.handler->dispose(false); + }); + } + } + + std::optional bind(std::shared_ptr scheduler) override { + if (!scheduler) + return std::nullopt; + + return bound_.exclusiveAccess( + [scheduler(std::move(scheduler))](BoundContexts &bound) { + auto const execution_tid = kHandlersCount + bound.next_tid_offset; + assert(bound.contexts.find(execution_tid) == bound.contexts.end()); + bound.contexts[execution_tid] = SchedulerContext{scheduler, false}; + ++bound.next_tid_offset; + return execution_tid; + }); + } + + bool unbind(Tid tid) override { + return bound_.exclusiveAccess([tid](BoundContexts &bound) { + return bound.contexts.erase(tid) == 1; + }); + } + }; + +} // namespace iroha::subscription + +#endif // IROHA_SUBSCRIPTION_ASYNC_DISPATCHER_IMPL_HPP diff --git a/irohad/subscription/dispatcher.hpp b/irohad/subscription/dispatcher.hpp new file mode 100644 index 00000000000..2ce73a4f933 --- /dev/null +++ b/irohad/subscription/dispatcher.hpp @@ -0,0 +1,35 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_SUBSCRIPTION_DISPATCHER_HPP +#define IROHA_SUBSCRIPTION_DISPATCHER_HPP + +#include + +#include "common/common.hpp" +#include "subscription/scheduler.hpp" + +namespace iroha::subscription { + + struct IDispatcher { + using Tid = uint32_t; + using Task = IScheduler::Task; + static constexpr Tid kExecuteInPool = std::numeric_limits::max(); + + virtual ~IDispatcher() {} + + virtual std::optional bind(std::shared_ptr scheduler) = 0; + virtual bool unbind(Tid tid) = 0; + + virtual void dispose() = 0; + virtual void add(Tid tid, Task &&task) = 0; + virtual void addDelayed(Tid tid, + std::chrono::microseconds timeout, + Task &&task) = 0; + }; + +} // namespace iroha::subscription + +#endif // IROHA_SUBSCRIPTION_DISPATCHER_HPP diff --git a/irohad/subscription/scheduler.hpp b/irohad/subscription/scheduler.hpp new file mode 100644 index 00000000000..6bb62538d86 --- /dev/null +++ b/irohad/subscription/scheduler.hpp @@ -0,0 +1,35 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_SUBSCRIPTION_SCHEDULER_HPP +#define IROHA_SUBSCRIPTION_SCHEDULER_HPP + +#include + +#include "common/common.hpp" + +namespace iroha::subscription { + + class IScheduler { + public: + using Task = std::function; + virtual ~IScheduler() {} + + /// Stops sheduler work and tasks execution + virtual void dispose(bool wait_for_release = true) = 0; + + /// Checks if current scheduler executes task + virtual bool isBusy() const = 0; + + /// Adds task to execution queue + virtual void add(Task &&t) = 0; + + /// Adds delayed task to execution queue + virtual void addDelayed(std::chrono::microseconds timeout, Task &&t) = 0; + }; + +} // namespace iroha::subscription + +#endif // IROHA_SUBSCRIPTION_SCHEDULER_HPP diff --git a/irohad/subscription/scheduler_impl.hpp b/irohad/subscription/scheduler_impl.hpp new file mode 100644 index 00000000000..4a270fe97d0 --- /dev/null +++ b/irohad/subscription/scheduler_impl.hpp @@ -0,0 +1,169 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_SUBSCRIPTION_SCHEDULER_IMPL_HPP +#define IROHA_SUBSCRIPTION_SCHEDULER_IMPL_HPP + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "subscription/scheduler.hpp" + +#include "common/common.hpp" + +/** + * If you need to execute task, that was made in this thread and want to be + * executed in the same thread without delay - you need to uncomment this define + */ +//#define SE_SYNC_CALL_IF_SAME_THREAD + +namespace iroha::subscription { + + class SchedulerBase : public IScheduler, utils::NoCopy, utils::NoMove { + private: + using Time = std::chrono::high_resolution_clock; + using Timepoint = std::chrono::time_point