diff --git a/.changelog/v0.9.0/bug-fixes/671-add-consensus-commit-timeout.md b/.changelog/v0.9.0/bug-fixes/671-add-consensus-commit-timeout.md new file mode 100644 index 00000000000..6c058a9904d --- /dev/null +++ b/.changelog/v0.9.0/bug-fixes/671-add-consensus-commit-timeout.md @@ -0,0 +1,2 @@ +- Add back consensus commit timeout configuration set in tendermint + ([#671](https://github.com/anoma/namada/pull/671)) \ No newline at end of file diff --git a/.changelog/v0.9.0/bug-fixes/702-fix-default-node-logging.md b/.changelog/v0.9.0/bug-fixes/702-fix-default-node-logging.md new file mode 100644 index 00000000000..146815a45df --- /dev/null +++ b/.changelog/v0.9.0/bug-fixes/702-fix-default-node-logging.md @@ -0,0 +1,2 @@ +- Fix info logs to show by default for namadan + ([#702](https://github.com/anoma/namada/pull/702)) \ No newline at end of file diff --git a/.changelog/v0.9.0/features/658-add-client-block-query.md b/.changelog/v0.9.0/features/658-add-client-block-query.md new file mode 100644 index 00000000000..29584d3a0fc --- /dev/null +++ b/.changelog/v0.9.0/features/658-add-client-block-query.md @@ -0,0 +1,2 @@ +- Client: Add a command to query the last committed block's hash, height and + timestamp. ([#658](https://github.com/anoma/namada/issues/658)) diff --git a/.changelog/v0.9.0/improvements/553-rpc-queries-router.md b/.changelog/v0.9.0/improvements/553-rpc-queries-router.md new file mode 100644 index 00000000000..877ac77c20e --- /dev/null +++ b/.changelog/v0.9.0/improvements/553-rpc-queries-router.md @@ -0,0 +1,4 @@ +- Replace the handcrafted RPC paths with a new `router!` macro RPC queries + definition that handles dynamic path matching, type-safe handler function + dispatch and also generates type-safe client methods for the queries. + ([#553](https://github.com/anoma/namada/pull/553)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/569-rpc-sub-shell.md b/.changelog/v0.9.0/improvements/569-rpc-sub-shell.md new file mode 100644 index 00000000000..96f0a8bd3b5 --- /dev/null +++ b/.changelog/v0.9.0/improvements/569-rpc-sub-shell.md @@ -0,0 +1,2 @@ +- Move all shell RPC endpoints under the /shell path. This is a breaking change + to RPC consumers. ([#569](https://github.com/anoma/namada/pull/569)) \ No newline at end of file diff --git a/.changelog/v0.9.0/miscellaneous/632-xan-to-nam.md b/.changelog/v0.9.0/miscellaneous/632-xan-to-nam.md new file mode 100644 index 00000000000..8f9f2c155c8 --- /dev/null +++ b/.changelog/v0.9.0/miscellaneous/632-xan-to-nam.md @@ -0,0 +1,2 @@ +- Renamed native token from XAN to NAM + ([#632](https://github.com/anoma/namada/pull/632)) \ No newline at end of file diff --git a/.changelog/v0.9.0/summary.md b/.changelog/v0.9.0/summary.md new file mode 100644 index 00000000000..9e2ad72e5c5 --- /dev/null +++ b/.changelog/v0.9.0/summary.md @@ -0,0 +1 @@ +Namada 0.9.0 is a scheduled minor release. diff --git a/.github/workflows/build-and-test-bridge.yml b/.github/workflows/build-and-test-bridge.yml index cee43b941b1..0ed0437ce4e 100644 --- a/.github/workflows/build-and-test-bridge.yml +++ b/.github/workflows/build-and-test-bridge.yml @@ -109,12 +109,13 @@ jobs: matrix: os: [ubuntu-latest] nightly_version: [nightly-2022-05-20] + mold_version: [1.6.0] make: - name: ABCI suffix: '' cache_key: anoma cache_version: v1 - wait_for: anoma-release-eth (ubuntu-latest, ABCI Release build, anoma-e2e-release, v1) + wait_for: anoma-release-eth (ubuntu-latest, 1.6.0, ABCI Release build, anoma-e2e-release, v1) tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 env: @@ -177,10 +178,18 @@ jobs: restore-keys: ${{ runner.os }}-${{ matrix.make.cache_key }}-${{ matrix.make.cache_version }}-cargo- - name: Start sccache server run: sccache --start-server + - name: Install mold linker + run: | + wget -q -O- https://github.com/rui314/mold/releases/download/v${{ matrix.mold_version }}/mold-${{ matrix.mold_version }}-x86_64-linux.tar.gz | tar -xz + mv mold-${{ matrix.mold_version }}-x86_64-linux/bin/mold /usr/local/bin - name: Build run: make build${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Build test run: make build-test${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Download wasm artifacts uses: actions/download-artifact@v3 with: @@ -188,8 +197,10 @@ jobs: path: ./wasm - name: Run unit test run: make test-unit${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Wait for release binaries - uses: lewagon/wait-on-check-action@master + uses: lewagon/wait-on-check-action@@v1.2.0 with: ref: ${{ github.event.pull_request.head.sha || github.ref }} check-name: ${{ matrix.make.wait_for }} @@ -228,10 +239,11 @@ jobs: ANOMA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 ANOMA_E2E_USE_PREBUILT_BINARIES: "true" ANOMA_E2E_KEEP_TEMP: "true" - ENV_VAR_TM_STDOUT: "false" + ANOMA_TM_STDOUT: "false" ANOMA_LOG_COLOR: "false" ANOMA_MASP_PARAMS_DIR: "/home/runner/work/masp" ANOMA_LOG: "info" + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Upload e2e logs if: success() || failure() uses: actions/upload-artifact@v3 @@ -240,7 +252,7 @@ jobs: path: | /tmp/.*/logs/ /tmp/.*/e2e-test.*/setup/validator-*/.anoma/logs/*.log - retention-days: 5 + retention-days: 3 - name: Print sccache stats if: always() run: sccache --show-stats @@ -255,6 +267,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] + mold_version: [1.6.0] make: - name: ABCI Release build suffix: '' @@ -316,8 +329,14 @@ jobs: restore-keys: ${{ runner.os }}-${{ matrix.make.cache_key }}-${{ matrix.make.cache_version }}-cargo- - name: Start sccache server run: sccache --start-server + - name: Install mold linker + run: | + wget -q -O- https://github.com/rui314/mold/releases/download/v${{ matrix.mold_version }}/mold-${{ matrix.mold_version }}-x86_64-linux.tar.gz | tar -xz + mv mold-${{ matrix.mold_version }}-x86_64-linux/bin/mold /usr/local/bin - name: Build run: make build-release${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Upload target binaries uses: actions/upload-artifact@v3 with: diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 1c4cbd34127..6ee13b06f31 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -111,12 +111,13 @@ jobs: matrix: os: [ubuntu-latest] nightly_version: [nightly-2022-05-20] + mold_version: [1.6.0] make: - name: ABCI suffix: '' cache_key: anoma cache_version: v1 - wait_for: anoma-release (ubuntu-latest, ABCI Release build, anoma-e2e-release, v1) + wait_for: anoma-release (ubuntu-latest, 1.6.0, ABCI Release build, anoma-e2e-release, v1) tendermint_artifact: tendermint-unreleased-ad825dcadbd4b98c3f91ce5a711e4fb36a69c377 env: @@ -179,10 +180,18 @@ jobs: restore-keys: ${{ runner.os }}-${{ matrix.make.cache_key }}-${{ matrix.make.cache_version }}-cargo- - name: Start sccache server run: sccache --start-server + - name: Install mold linker + run: | + wget -q -O- https://github.com/rui314/mold/releases/download/v${{ matrix.mold_version }}/mold-${{ matrix.mold_version }}-x86_64-linux.tar.gz | tar -xz + mv mold-${{ matrix.mold_version }}-x86_64-linux/bin/mold /usr/local/bin - name: Build run: make build${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Build test run: make build-test${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Download wasm artifacts uses: actions/download-artifact@v3 with: @@ -190,8 +199,10 @@ jobs: path: ./wasm - name: Run unit test run: make test-unit${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Wait for release binaries - uses: lewagon/wait-on-check-action@master + uses: lewagon/wait-on-check-action@v1.2.0 with: ref: ${{ github.event.pull_request.head.sha || github.ref }} check-name: ${{ matrix.make.wait_for }} @@ -230,10 +241,11 @@ jobs: ANOMA_TENDERMINT_WEBSOCKET_TIMEOUT: 20 ANOMA_E2E_USE_PREBUILT_BINARIES: "true" ANOMA_E2E_KEEP_TEMP: "true" - ENV_VAR_TM_STDOUT: "false" + ANOMA_TM_STDOUT: "false" ANOMA_LOG_COLOR: "false" ANOMA_MASP_PARAMS_DIR: "/home/runner/work/masp" ANOMA_LOG: "info" + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Upload e2e logs if: success() || failure() uses: actions/upload-artifact@v3 @@ -257,6 +269,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] + mold_version: [1.6.0] make: - name: ABCI Release build suffix: '' @@ -316,10 +329,16 @@ jobs: ~/.cargo/git key: ${{ runner.os }}-${{ matrix.make.cache_key }}-${{ matrix.make.cache_version }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: ${{ runner.os }}-${{ matrix.make.cache_key }}-${{ matrix.make.cache_version }}-cargo- + - name: Install mold linker + run: | + wget -q -O- https://github.com/rui314/mold/releases/download/v${{ matrix.mold_version }}/mold-${{ matrix.mold_version }}-x86_64-linux.tar.gz | tar -xz + mv mold-${{ matrix.mold_version }}-x86_64-linux/bin/mold /usr/local/bin - name: Start sccache server run: sccache --start-server - name: Build run: make build-release${{ matrix.make.suffix }} + env: + RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold" - name: Upload target binaries uses: actions/upload-artifact@v3 with: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0ba9f39b985..5304af902e1 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -41,18 +41,21 @@ jobs: command: cd documentation/specs && mdbook build cache_subkey: specs cache_version: v1 + distribution_id: E2Y9R2H4P5LYED - name: Build docs folder: documentation/docs bucket: namada-docs-static-website command: cd documentation/docs && mdbook build cache_subkey: docs cache_version: v1 + distribution_id: E2T9UML53913RV - name: Build development docs folder: documentation/dev bucket: namada-dev-static-website command: cargo run --bin namada_encoding_spec && cd documentation/dev && mdbook build cache_subkey: dev cache_version: v1 + distribution_id: E6XPP5KFWXJFQ env: CARGO_INCREMENTAL: 0 @@ -127,6 +130,9 @@ jobs: - name: Publish docs if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} run: aws s3 sync ${{ matrix.make.folder }}/book/html/ s3://${{ matrix.make.bucket }} --region eu-west-1 --delete + - name: Invalidate distribution cache + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + run: aws cloudfront create-invalidation --distribution-id ${{ matrix.make.distribution_id }} --paths "/*" - name: Print sccache stats if: always() run: sccache --show-stats diff --git a/CHANGELOG.md b/CHANGELOG.md index b7095801d5c..e4cd01a1275 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,35 @@ # CHANGELOG +## v0.9.0 + +Namada 0.9.0 is a scheduled minor release. + +### BUG FIXES + +- Add back consensus commit timeout configuration set in tendermint + ([#671](https://github.com/anoma/namada/pull/671)) +- Fix info logs to show by default for namadan + ([#702](https://github.com/anoma/namada/pull/702)) + +### FEATURES + +- Client: Add a command to query the last committed block's hash, height and + timestamp. ([#658](https://github.com/anoma/namada/issues/658)) + +### IMPROVEMENTS + +- Replace the handcrafted RPC paths with a new `router!` macro RPC queries + definition that handles dynamic path matching, type-safe handler function + dispatch and also generates type-safe client methods for the queries. + ([#553](https://github.com/anoma/namada/pull/553)) +- Move all shell RPC endpoints under the /shell path. This is a breaking change + to RPC consumers. ([#569](https://github.com/anoma/namada/pull/569)) + +### MISCELLANEOUS + +- Renamed native token from XAN to NAM + ([#632](https://github.com/anoma/namada/pull/632)) + ## v0.8.1 Namada 0.8.1 is a point release focused on standardizing Tendermint diff --git a/Cargo.lock b/Cargo.lock index e081e3e4bd9..30ab6e26554 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3313,12 +3313,13 @@ dependencies = [ [[package]] name = "namada" -version = "0.8.1" +version = "0.9.0" dependencies = [ "ark-bls12-381", "ark-ec", "ark-serialize", "assert_matches", + "async-trait", "bech32", "borsh", "byte-unit", @@ -3344,6 +3345,7 @@ dependencies = [ "namada_proof_of_stake", "num-rational 0.4.1", "parity-wasm", + "paste", "pretty_assertions", "proptest", "prost", @@ -3351,6 +3353,7 @@ dependencies = [ "pwasm-utils", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde 1.0.145", "serde_json", @@ -3361,9 +3364,12 @@ dependencies = [ "tendermint 0.23.6", "tendermint-proto 0.23.5", "tendermint-proto 0.23.6", + "tendermint-rpc 0.23.5", + "tendermint-rpc 0.23.6", "test-log", "thiserror", "tiny-keccak", + "tokio", "tonic-build", "tracing 0.1.37", "tracing-subscriber 0.3.16", @@ -3379,7 +3385,7 @@ dependencies = [ [[package]] name = "namada_apps" -version = "0.8.1" +version = "0.9.0" dependencies = [ "ark-serialize", "ark-std", @@ -3472,7 +3478,7 @@ dependencies = [ [[package]] name = "namada_encoding_spec" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "itertools", @@ -3483,7 +3489,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.8.1" +version = "0.9.0" dependencies = [ "quote", "syn", @@ -3491,7 +3497,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "derivative", @@ -3501,7 +3507,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.8.1" +version = "0.9.0" dependencies = [ "assert_cmd", "borsh", @@ -3535,7 +3541,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -3547,7 +3553,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -3555,7 +3561,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -4618,9 +4624,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg 1.1.0", "crossbeam-deque", @@ -5655,7 +5661,7 @@ dependencies = [ [[package]] name = "tendermint" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "async-trait", "bytes 1.2.1", @@ -5696,7 +5702,7 @@ dependencies = [ [[package]] name = "tendermint-config" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "flex-error", "serde 1.0.145", @@ -5722,7 +5728,7 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "derive_more", "flex-error", @@ -5751,7 +5757,7 @@ dependencies = [ [[package]] name = "tendermint-proto" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "bytes 1.2.1", "flex-error", @@ -5801,7 +5807,7 @@ dependencies = [ [[package]] name = "tendermint-rpc" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "async-trait", "async-tungstenite", @@ -5849,7 +5855,7 @@ dependencies = [ [[package]] name = "tendermint-testgen" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "ed25519-dalek", "gumdrop", diff --git a/Cargo.toml b/Cargo.toml index 4d635125501..5cfa43de021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,13 +34,13 @@ async-process = {git = "https://github.com/heliaxdev/async-process.git", rev = " # borsh-derive-internal = {path = "../borsh-rs/borsh-derive-internal"} # borsh-schema-derive-internal = {path = "../borsh-rs/borsh-schema-derive-internal"} -# patched to a commit on the `eth-bridge-integration` branch of our fork -tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} +# patched to a commit on the `eth-bridge-integration+consensus-timeout` branch of our fork +tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-config = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-rpc = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} # patched to a commit on the `eth-bridge-integration` branch of our fork ibc = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} diff --git a/README.md b/README.md index 6888f89e879..b96cb34229e 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,8 @@ ## Overview -[Namada](http://namada.net) is a sovereign proof-of-stake blockchain, using Tendermint BFT -consensus, that enables multi-asset shielded transfers for any native +[Namada](http://namada.net) is a Proof-of-Stake L1 for interchain asset-agnostic privacy. Namada uses Tendermint BFT +consensus and enables multi-asset shielded transfers for any native or non-native asset. Namada features full IBC protocol support, a natively integrated Ethereum bridge, a modern proof-of-stake system with automatic reward compounding and cubic slashing, and a diff --git a/apps/Cargo.toml b/apps/Cargo.toml index 3aaa849adef..3a0e59c4954 100644 --- a/apps/Cargo.toml +++ b/apps/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_apps" readme = "../README.md" resolver = "2" -version = "0.8.1" +version = "0.9.0" default-run = "namada" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -64,7 +64,7 @@ abciplus = [ ] [dependencies] -namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "secp256k1-sign-verify"]} +namada = {path = "../shared", features = ["wasm-runtime", "ferveo-tpke", "rand", "tendermint-rpc", "secp256k1-sign-verify"]} ark-serialize = "0.3.0" ark-std = "0.3.0" # branch = "bat/arse-merkle-tree" @@ -107,7 +107,7 @@ prost = "0.9.0" prost-types = "0.9.0" rand = {version = "0.8", default-features = false} rand_core = {version = "0.6", default-features = false} -rayon = "=1.5.1" +rayon = "=1.5.3" regex = "1.4.5" reqwest = "0.11.4" rlimit = "0.5.4" diff --git a/apps/src/bin/anoma-client/cli.rs b/apps/src/bin/anoma-client/cli.rs index b87cdb5c661..841e4b56ae0 100644 --- a/apps/src/bin/anoma-client/cli.rs +++ b/apps/src/bin/anoma-client/cli.rs @@ -27,12 +27,6 @@ pub async fn main() -> Result<()> { Sub::TxInitValidator(TxInitValidator(args)) => { tx::submit_init_validator(ctx, args).await; } - Sub::TxInitNft(TxInitNft(args)) => { - tx::submit_init_nft(ctx, args).await; - } - Sub::TxMintNft(TxMintNft(args)) => { - tx::submit_mint_nft(ctx, args).await; - } Sub::TxInitProposal(TxInitProposal(args)) => { tx::submit_init_proposal(ctx, args).await; } @@ -52,6 +46,9 @@ pub async fn main() -> Result<()> { Sub::QueryEpoch(QueryEpoch(args)) => { rpc::query_epoch(args).await; } + Sub::QueryBlock(QueryBlock(args)) => { + rpc::query_block(args).await; + } Sub::QueryBalance(QueryBalance(args)) => { rpc::query_balance(ctx, args).await; } diff --git a/apps/src/bin/anoma-node/main.rs b/apps/src/bin/anoma-node/main.rs index 49cbd886c80..b37feb3cdb9 100644 --- a/apps/src/bin/anoma-node/main.rs +++ b/apps/src/bin/anoma-node/main.rs @@ -1,18 +1,15 @@ mod cli; -use std::str::FromStr; - use color_eyre::eyre::Result; use namada_apps::logging; -use tracing_subscriber::filter::Directive; +use tracing_subscriber::filter::LevelFilter; fn main() -> Result<()> { // init error reporting color_eyre::install()?; // init logging - let default_directive = Directive::from_str("anoma=info")?; - logging::init_from_env_or(default_directive)?; + logging::init_from_env_or(LevelFilter::INFO)?; // run the CLI cli::main() diff --git a/apps/src/bin/anoma/cli.rs b/apps/src/bin/anoma/cli.rs index ccde0c36185..3737f63a8b2 100644 --- a/apps/src/bin/anoma/cli.rs +++ b/apps/src/bin/anoma/cli.rs @@ -46,8 +46,6 @@ fn handle_command(cmd: cli::cmds::Anoma, raw_sub_cmd: String) -> Result<()> { | cli::cmds::Anoma::TxCustom(_) | cli::cmds::Anoma::TxTransfer(_) | cli::cmds::Anoma::TxUpdateVp(_) - | cli::cmds::Anoma::TxInitNft(_) - | cli::cmds::Anoma::TxMintNft(_) | cli::cmds::Anoma::TxInitProposal(_) | cli::cmds::Anoma::TxVoteProposal(_) => { handle_subcommand("namadac", sub_args) diff --git a/apps/src/lib/cli.rs b/apps/src/lib/cli.rs index ad90f1d2cb7..3c9374bcb3c 100644 --- a/apps/src/lib/cli.rs +++ b/apps/src/lib/cli.rs @@ -47,8 +47,6 @@ pub mod cmds { TxCustom(TxCustom), TxTransfer(TxTransfer), TxUpdateVp(TxUpdateVp), - TxInitNft(TxInitNft), - TxMintNft(TxMintNft), TxInitProposal(TxInitProposal), TxVoteProposal(TxVoteProposal), } @@ -62,8 +60,6 @@ pub mod cmds { .subcommand(TxCustom::def()) .subcommand(TxTransfer::def()) .subcommand(TxUpdateVp::def()) - .subcommand(TxInitNft::def()) - .subcommand(TxMintNft::def()) .subcommand(TxInitProposal::def()) .subcommand(TxVoteProposal::def()) } @@ -76,8 +72,6 @@ pub mod cmds { let tx_custom = SubCmd::parse(matches).map(Self::TxCustom); let tx_transfer = SubCmd::parse(matches).map(Self::TxTransfer); let tx_update_vp = SubCmd::parse(matches).map(Self::TxUpdateVp); - let tx_nft_create = SubCmd::parse(matches).map(Self::TxInitNft); - let tx_nft_mint = SubCmd::parse(matches).map(Self::TxMintNft); let tx_init_proposal = SubCmd::parse(matches).map(Self::TxInitProposal); let tx_vote_proposal = @@ -88,8 +82,6 @@ pub mod cmds { .or(tx_custom) .or(tx_transfer) .or(tx_update_vp) - .or(tx_nft_create) - .or(tx_nft_mint) .or(tx_init_proposal) .or(tx_vote_proposal) } @@ -155,9 +147,6 @@ pub mod cmds { .subcommand(TxUpdateVp::def().display_order(1)) .subcommand(TxInitAccount::def().display_order(1)) .subcommand(TxInitValidator::def().display_order(1)) - // Nft transactions - .subcommand(TxInitNft::def().display_order(1)) - .subcommand(TxMintNft::def().display_order(1)) // Proposal transactions .subcommand(TxInitProposal::def().display_order(1)) .subcommand(TxVoteProposal::def().display_order(1)) @@ -167,6 +156,7 @@ pub mod cmds { .subcommand(Withdraw::def().display_order(2)) // Queries .subcommand(QueryEpoch::def().display_order(3)) + .subcommand(QueryBlock::def().display_order(3)) .subcommand(QueryBalance::def().display_order(3)) .subcommand(QueryBonds::def().display_order(3)) .subcommand(QueryVotingPower::def().display_order(3)) @@ -188,8 +178,6 @@ pub mod cmds { let tx_init_account = Self::parse_with_ctx(matches, TxInitAccount); let tx_init_validator = Self::parse_with_ctx(matches, TxInitValidator); - let tx_nft_create = Self::parse_with_ctx(matches, TxInitNft); - let tx_nft_mint = Self::parse_with_ctx(matches, TxMintNft); let tx_init_proposal = Self::parse_with_ctx(matches, TxInitProposal); let tx_vote_proposal = @@ -198,6 +186,7 @@ pub mod cmds { let unbond = Self::parse_with_ctx(matches, Unbond); let withdraw = Self::parse_with_ctx(matches, Withdraw); let query_epoch = Self::parse_with_ctx(matches, QueryEpoch); + let query_block = Self::parse_with_ctx(matches, QueryBlock); let query_balance = Self::parse_with_ctx(matches, QueryBalance); let query_bonds = Self::parse_with_ctx(matches, QueryBonds); let query_voting_power = @@ -216,14 +205,13 @@ pub mod cmds { .or(tx_update_vp) .or(tx_init_account) .or(tx_init_validator) - .or(tx_nft_create) - .or(tx_nft_mint) .or(tx_init_proposal) .or(tx_vote_proposal) .or(bond) .or(unbond) .or(withdraw) .or(query_epoch) + .or(query_block) .or(query_balance) .or(query_bonds) .or(query_voting_power) @@ -275,14 +263,13 @@ pub mod cmds { TxUpdateVp(TxUpdateVp), TxInitAccount(TxInitAccount), TxInitValidator(TxInitValidator), - TxInitNft(TxInitNft), - TxMintNft(TxMintNft), TxInitProposal(TxInitProposal), TxVoteProposal(TxVoteProposal), Bond(Bond), Unbond(Unbond), Withdraw(Withdraw), QueryEpoch(QueryEpoch), + QueryBlock(QueryBlock), QueryBalance(QueryBalance), QueryBonds(QueryBonds), QueryVotingPower(QueryVotingPower), @@ -936,6 +923,25 @@ pub mod cmds { } } + #[derive(Clone, Debug)] + pub struct QueryBlock(pub args::Query); + + impl SubCmd for QueryBlock { + const CMD: &'static str = "block"; + + fn parse(matches: &ArgMatches) -> Option { + matches + .subcommand_matches(Self::CMD) + .map(|matches| QueryBlock(args::Query::parse(matches))) + } + + fn def() -> App { + App::new(Self::CMD) + .about("Query the last committed block.") + .add_args::() + } + } + #[derive(Clone, Debug)] pub struct QueryBalance(pub args::QueryBalance); @@ -1034,50 +1040,6 @@ pub mod cmds { } } - #[derive(Clone, Debug)] - pub struct TxInitNft(pub args::NftCreate); - - impl SubCmd for TxInitNft { - const CMD: &'static str = "init-nft"; - - fn parse(matches: &ArgMatches) -> Option - where - Self: Sized, - { - matches - .subcommand_matches(Self::CMD) - .map(|matches| TxInitNft(args::NftCreate::parse(matches))) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Create a new NFT.") - .add_args::() - } - } - - #[derive(Clone, Debug)] - pub struct TxMintNft(pub args::NftMint); - - impl SubCmd for TxMintNft { - const CMD: &'static str = "mint-nft"; - - fn parse(matches: &ArgMatches) -> Option - where - Self: Sized, - { - matches - .subcommand_matches(Self::CMD) - .map(|matches| TxMintNft(args::NftMint::parse(matches))) - } - - fn def() -> App { - App::new(Self::CMD) - .about("Mint new NFT tokens.") - .add_args::() - } - } - #[derive(Clone, Debug)] pub struct TxInitProposal(pub args::InitProposal); @@ -1296,7 +1258,7 @@ pub mod args { const FEE_AMOUNT: ArgDefault = arg_default("fee-amount", DefaultFn(|| token::Amount::from(0))); const FEE_TOKEN: ArgDefaultFromCtx = - arg_default_from_ctx("fee-token", DefaultFn(|| "XAN".into())); + arg_default_from_ctx("fee-token", DefaultFn(|| "NAM".into())); const FORCE: ArgFlag = flag("force"); const DONT_PREFETCH_WASM: ArgFlag = flag("dont-prefetch-wasm"); const GAS_LIMIT: ArgDefault = @@ -1316,7 +1278,6 @@ pub mod args { const LOCALHOST: ArgFlag = flag("localhost"); const MODE: ArgOpt = arg_opt("mode"); const NET_ADDRESS: Arg = arg("net-address"); - const NFT_ADDRESS: Arg
= arg("nft-address"); const OWNER: ArgOpt = arg_opt("owner"); const PROPOSAL_OFFLINE: ArgFlag = flag("offline"); const PROTOCOL_KEY: ArgOpt = arg_opt("protocol-key"); @@ -2009,66 +1970,6 @@ pub mod args { } } - // Transaction to create a new nft - #[derive(Clone, Debug)] - pub struct NftCreate { - /// Common tx argumentsips - pub tx: Tx, - /// Path to the nft file description - pub nft_data: PathBuf, - } - - impl Args for NftCreate { - fn parse(matches: &ArgMatches) -> Self { - let tx = Tx::parse(matches); - let data_path = DATA_PATH.parse(matches); - - Self { - tx, - nft_data: data_path, - } - } - - fn def(app: App) -> App { - app.add_args::() - .arg(DATA_PATH.def().about("The path nft description file.")) - } - } - - #[derive(Clone, Debug)] - pub struct NftMint { - /// Common tx arguments - pub tx: Tx, - /// The nft address - pub nft_address: Address, - /// The nft token description - pub nft_data: PathBuf, - } - - impl Args for NftMint { - fn parse(matches: &ArgMatches) -> Self { - let tx = Tx::parse(matches); - let nft_address = NFT_ADDRESS.parse(matches); - let data_path = DATA_PATH.parse(matches); - - Self { - tx, - nft_address, - nft_data: data_path, - } - } - - fn def(app: App) -> App { - app.add_args::() - .arg(NFT_ADDRESS.def().about("The nft address.")) - .arg( - DATA_PATH.def().about( - "The data path file that describes the nft tokens.", - ), - ) - } - } - /// Query token balance(s) #[derive(Clone, Debug)] pub struct QueryBalance { diff --git a/apps/src/lib/client/rpc.rs b/apps/src/lib/client/rpc.rs index d191fada0f1..6d181085d04 100644 --- a/apps/src/lib/client/rpc.rs +++ b/apps/src/lib/client/rpc.rs @@ -23,6 +23,7 @@ use namada::ledger::pos::types::{ use namada::ledger::pos::{ self, is_validator_slashes_key, BondId, Bonds, PosParams, Slash, Unbonds, }; +use namada::ledger::queries::{self, RPC}; use namada::types::address::Address; use namada::types::governance::{ OfflineProposal, OfflineVote, ProposalResult, ProposalVote, TallyResult, @@ -32,11 +33,11 @@ use namada::types::key::*; use namada::types::storage::{Epoch, Key, KeySeg, PrefixValue}; use namada::types::token::{balance_key, Amount}; use namada::types::{address, storage, token}; +use tendermint::abci::Code; use tokio::time::{Duration, Instant}; use crate::cli::{self, args, Context}; use crate::client::tendermint_rpc_types::TxResponse; -use crate::facade::tendermint::abci::Code; use crate::facade::tendermint_config::net::Address as TendermintAddress; use crate::facade::tendermint_rpc::error::Error as TError; use crate::facade::tendermint_rpc::query::Query; @@ -44,7 +45,6 @@ use crate::facade::tendermint_rpc::{ Client, HttpClient, Order, SubscriptionClient, WebSocketClient, }; use crate::node::ledger::events::Event; -use crate::node::ledger::rpc::Path; /// Query the status of a given transaction. /// @@ -123,51 +123,38 @@ pub async fn query_tx_status( /// Query the epoch of the last committed block pub async fn query_epoch(args: args::Query) -> Epoch { let client = HttpClient::new(args.ledger_address).unwrap(); - let path = Path::Epoch; - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match Epoch::try_from_slice(&response.value[..]) { - Ok(epoch) => { - println!("Last committed epoch: {}", epoch); - return epoch; - } + let epoch = unwrap_client_response(RPC.shell().epoch(&client).await); + println!("Last committed epoch: {}", epoch); + epoch +} - Err(err) => { - eprintln!("Error decoding the epoch value: {}", err) - } - }, - Code::Err(err) => eprintln!( - "Error in the query {} (error code {})", - response.info, err - ), - } - cli::safe_exit(1) +/// Query the last committed block +pub async fn query_block( + args: args::Query, +) -> tendermint_rpc::endpoint::block::Response { + let client = HttpClient::new(args.ledger_address).unwrap(); + let response = client.latest_block().await.unwrap(); + println!( + "Last committed block ID: {}, height: {}, time: {}", + response.block_id, + response.block.header.height, + response.block.header.time + ); + response } /// Query the raw bytes of given storage key pub async fn query_raw_bytes(_ctx: Context, args: args::QueryRawBytes) { let client = HttpClient::new(args.query.ledger_address).unwrap(); - let path = Path::Value(args.storage_key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => { - println!("{}", HEXLOWER.encode(&response.value)); - } - Code::Err(err) => { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ); - cli::safe_exit(1) - } + let response = unwrap_client_response( + RPC.shell() + .storage_value(&client, None, None, false, &args.storage_key) + .await, + ); + if !response.data.is_empty() { + println!("Found data: 0x{}", HEXLOWER.encode(&response.data)); + } else { + println!("No data found for key {}", args.storage_key); } } @@ -211,11 +198,9 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { let owner = ctx.get(&owner); for (token, _) in tokens { let prefix = token.to_db_key().into(); - let balances = query_storage_prefix::( - client.clone(), - prefix, - ) - .await; + let balances = + query_storage_prefix::(&client, &prefix) + .await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, Some(&owner)); } @@ -225,7 +210,7 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { let token = ctx.get(&token); let prefix = token.to_db_key().into(); let balances = - query_storage_prefix::(client, prefix).await; + query_storage_prefix::(&client, &prefix).await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, None); } @@ -234,8 +219,7 @@ pub async fn query_balance(ctx: Context, args: args::QueryBalance) { for (token, _) in tokens { let key = token::balance_prefix(&token); let balances = - query_storage_prefix::(client.clone(), key) - .await; + query_storage_prefix::(&client, &key).await; if let Some(balances) = balances { print_balances(&ctx, balances, &token, None); } @@ -736,18 +720,14 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { let owner = ctx.get(&owner); // Find owner's bonds to any validator let bonds_prefix = pos::bonds_for_source_prefix(&owner); - let bonds = query_storage_prefix::( - client.clone(), - bonds_prefix, - ) - .await; + let bonds = + query_storage_prefix::(&client, &bonds_prefix) + .await; // Find owner's unbonds to any validator let unbonds_prefix = pos::unbonds_for_source_prefix(&owner); - let unbonds = query_storage_prefix::( - client.clone(), - unbonds_prefix, - ) - .await; + let unbonds = + query_storage_prefix::(&client, &unbonds_prefix) + .await; let mut total: token::Amount = 0.into(); let mut total_active: token::Amount = 0.into(); @@ -856,18 +836,14 @@ pub async fn query_bonds(ctx: Context, args: args::QueryBonds) { (None, None) => { // Find all the bonds let bonds_prefix = pos::bonds_prefix(); - let bonds = query_storage_prefix::( - client.clone(), - bonds_prefix, - ) - .await; + let bonds = + query_storage_prefix::(&client, &bonds_prefix) + .await; // Find all the unbonds let unbonds_prefix = pos::unbonds_prefix(); - let unbonds = query_storage_prefix::( - client.clone(), - unbonds_prefix, - ) - .await; + let unbonds = + query_storage_prefix::(&client, &unbonds_prefix) + .await; let mut total: token::Amount = 0.into(); let mut total_active: token::Amount = 0.into(); @@ -1108,11 +1084,9 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { None => { // Iterate slashes for all validators let slashes_prefix = pos::slashes_prefix(); - let slashes = query_storage_prefix::( - client.clone(), - slashes_prefix, - ) - .await; + let slashes = + query_storage_prefix::(&client, &slashes_prefix) + .await; match slashes { Some(slashes) => { @@ -1151,12 +1125,12 @@ pub async fn query_slashes(ctx: Context, args: args::QuerySlashes) { /// Dry run a transaction pub async fn dry_run_tx(ledger_address: &TendermintAddress, tx_bytes: Vec) { let client = HttpClient::new(ledger_address.clone()).unwrap(); - let path = Path::DryRunTx; - let response = client - .abci_query(Some(path.into()), tx_bytes, None, false) - .await - .unwrap(); - println!("{:#?}", response); + let (data, height, prove) = (Some(tx_bytes), None, false); + let result = unwrap_client_response( + RPC.shell().dry_run_tx(&client, data, height, prove).await, + ) + .data; + println!("Dry-run result: {}", result); } /// Get account's public key stored in its storage sub-space @@ -1189,7 +1163,7 @@ pub async fn is_delegator( let client = HttpClient::new(ledger_address).unwrap(); let bonds_prefix = pos::bonds_for_source_prefix(address); let bonds = - query_storage_prefix::(client.clone(), bonds_prefix).await; + query_storage_prefix::(&client, &bonds_prefix).await; bonds.is_some() && bonds.unwrap().count() > 0 } @@ -1199,8 +1173,7 @@ pub async fn is_delegator_at( epoch: Epoch, ) -> bool { let key = pos::bonds_for_source_prefix(address); - let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + let bonds_iter = query_storage_prefix::(client, &key).await; if let Some(mut bonds) = bonds_iter { bonds.any(|(_, bond)| bond.get(epoch).is_some()) } else { @@ -1220,7 +1193,7 @@ pub async fn known_address( Address::Established(_) => { // Established account exists if it has a VP let key = storage::Key::validity_predicate(address); - query_has_storage_key(client, key).await + query_has_storage_key(&client, &key).await } Address::Implicit(_) | Address::Internal(_) => true, } @@ -1369,100 +1342,77 @@ pub async fn query_storage_value( where T: BorshDeserialize, { - let path = Path::Value(key.to_owned()); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match T::try_from_slice(&response.value[..]) { - Ok(value) => return Some(value), - Err(err) => eprintln!("Error decoding the value: {}", err), - }, - Code::Err(err) => { - if err == 1 { - return None; - } else { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) - } - } + // In case `T` is a unit (only thing that encodes to 0 bytes), we have to + // use `storage_has_key` instead of `storage_value`, because `storage_value` + // returns 0 bytes when the key is not found. + let maybe_unit = T::try_from_slice(&[]); + if let Ok(unit) = maybe_unit { + return if unwrap_client_response( + RPC.shell().storage_has_key(client, key).await, + ) { + Some(unit) + } else { + None + }; + } + + let response = unwrap_client_response( + RPC.shell() + .storage_value(client, None, None, false, key) + .await, + ); + if response.data.is_empty() { + return None; } - cli::safe_exit(1) + T::try_from_slice(&response.data[..]) + .map(Some) + .unwrap_or_else(|err| { + eprintln!("Error decoding the value: {}", err); + cli::safe_exit(1) + }) } /// Query a range of storage values with a matching prefix and decode them with /// [`BorshDeserialize`]. Returns an iterator of the storage keys paired with /// their associated values. pub async fn query_storage_prefix( - client: HttpClient, - key: storage::Key, + client: &HttpClient, + key: &storage::Key, ) -> Option> where T: BorshDeserialize, { - let path = Path::Prefix(key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => { - match Vec::::try_from_slice(&response.value[..]) { - Ok(values) => { - let decode = |PrefixValue { key, value }: PrefixValue| { - match T::try_from_slice(&value[..]) { - Err(_) => None, - Ok(value) => Some((key, value)), - } - }; - return Some(values.into_iter().filter_map(decode)); - } - Err(err) => eprintln!("Error decoding the values: {}", err), - } - } - Code::Err(err) => { - if err == 1 { - return None; - } else { + let values = unwrap_client_response( + RPC.shell() + .storage_prefix(client, None, None, false, key) + .await, + ); + let decode = + |PrefixValue { key, value }: PrefixValue| match T::try_from_slice( + &value[..], + ) { + Err(err) => { eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) + "Skipping a value for key {}. Error in decoding: {}", + key, err + ); + None } - } + Ok(value) => Some((key, value)), + }; + if values.data.is_empty() { + None + } else { + Some(values.data.into_iter().filter_map(decode)) } - cli::safe_exit(1) } /// Query to check if the given storage key exists. pub async fn query_has_storage_key( - client: HttpClient, - key: storage::Key, + client: &HttpClient, + key: &storage::Key, ) -> bool { - let path = Path::HasKey(key); - let data = vec![]; - let response = client - .abci_query(Some(path.into()), data, None, false) - .await - .unwrap(); - match response.code { - Code::Ok => match bool::try_from_slice(&response.value[..]) { - Ok(value) => return value, - Err(err) => eprintln!("Error decoding the value: {}", err), - }, - Code::Err(err) => { - eprintln!( - "Error in the query {} (error code {})", - response.info, err - ) - } - } - cli::safe_exit(1) + unwrap_client_response(RPC.shell().storage_has_key(client, key).await) } /// Represents a query for an event pertaining to the specified transaction @@ -1640,8 +1590,7 @@ pub async fn get_proposal_votes( let vote_prefix_key = gov_storage::get_proposal_vote_prefix_key(proposal_id); let vote_iter = - query_storage_prefix::(client.clone(), vote_prefix_key) - .await; + query_storage_prefix::(client, &vote_prefix_key).await; let mut yay_validators: HashMap = HashMap::new(); let mut yay_delegators: HashMap> = @@ -1746,7 +1695,7 @@ pub async fn get_proposal_offline_votes( { let key = pos::bonds_for_source_prefix(&proposal_vote.address); let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + query_storage_prefix::(client, &key).await; if let Some(bonds) = bonds_iter { for (key, epoched_bonds) in bonds { // Look-up slashes for the validator in this key and @@ -1991,8 +1940,7 @@ pub async fn get_delegators_delegation( _epoch: Epoch, ) -> Vec
{ let key = pos::bonds_for_source_prefix(address); - let bonds_iter = - query_storage_prefix::(client.clone(), key).await; + let bonds_iter = query_storage_prefix::(client, &key).await; let mut delegation_addresses: Vec
= Vec::new(); if let Some(bonds) = bonds_iter { @@ -2054,3 +2002,11 @@ fn lookup_alias(ctx: &Context, addr: &Address) -> String { None => format!("{}", addr), } } + +/// A helper to unwrap client's response. Will shut down process on error. +fn unwrap_client_response(response: Result) -> T { + response.unwrap_or_else(|err| { + eprintln!("Error in the query {}", err); + cli::safe_exit(1) + }) +} diff --git a/apps/src/lib/client/tx.rs b/apps/src/lib/client/tx.rs index ec6b0894a54..96b74306de3 100644 --- a/apps/src/lib/client/tx.rs +++ b/apps/src/lib/client/tx.rs @@ -9,17 +9,15 @@ use itertools::Either::*; use namada::ledger::governance::storage as gov_storage; use namada::ledger::pos::{BondId, Bonds, Unbonds}; use namada::proto::Tx; -use namada::types::address::{xan as m1t, Address}; +use namada::types::address::{nam, Address}; use namada::types::governance::{ OfflineProposal, OfflineVote, Proposal, ProposalVote, }; use namada::types::key::{self, *}; -use namada::types::nft::{self, Nft, NftToken}; use namada::types::storage::{Epoch, Key}; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, }; -use namada::types::transaction::nft::{CreateNft, MintNft}; use namada::types::transaction::{pos, InitAccount, InitValidator, UpdateVp}; use namada::types::{address, token}; use namada::{ledger, vm}; @@ -42,13 +40,10 @@ const TX_INIT_PROPOSAL: &str = "tx_init_proposal.wasm"; const TX_VOTE_PROPOSAL: &str = "tx_vote_proposal.wasm"; const TX_UPDATE_VP_WASM: &str = "tx_update_vp.wasm"; const TX_TRANSFER_WASM: &str = "tx_transfer.wasm"; -const TX_INIT_NFT: &str = "tx_init_nft.wasm"; -const TX_MINT_NFT: &str = "tx_mint_nft.wasm"; const VP_USER_WASM: &str = "vp_user.wasm"; const TX_BOND_WASM: &str = "tx_bond.wasm"; const TX_UNBOND_WASM: &str = "tx_unbond.wasm"; const TX_WITHDRAW_WASM: &str = "tx_withdraw.wasm"; -const VP_NFT: &str = "vp_nft.wasm"; /// Timeout for requests to the `/accepted` and `/applied` /// ABCI query endpoints. @@ -521,75 +516,6 @@ pub async fn submit_transfer(ctx: Context, args: args::TxTransfer) { process_tx(ctx, &args.tx, tx, Some(&args.source)).await; } -pub async fn submit_init_nft(ctx: Context, args: args::NftCreate) { - let file = File::open(&args.nft_data).expect("File must exist."); - let nft: Nft = serde_json::from_reader(file) - .expect("Couldn't deserialize nft data file"); - - let vp_code = match &nft.vp_path { - Some(path) => { - std::fs::read(path).expect("Expected a file at given code path") - } - None => ctx.read_wasm(VP_NFT), - }; - - let signer = Some(WalletAddress::new(nft.creator.clone().to_string())); - - let data = CreateNft { - tag: nft.tag.to_string(), - creator: nft.creator, - vp_code, - keys: nft.keys, - opt_keys: nft.opt_keys, - tokens: nft.tokens, - }; - - let data = data.try_to_vec().expect( - "Encoding transfer data to initialize a new account shouldn't fail", - ); - - let tx_code = ctx.read_wasm(TX_INIT_NFT); - - let tx = Tx::new(tx_code, Some(data)); - process_tx(ctx, &args.tx, tx, signer.as_ref()).await; -} - -pub async fn submit_mint_nft(ctx: Context, args: args::NftMint) { - let file = File::open(&args.nft_data).expect("File must exist."); - let nft_tokens: Vec = - serde_json::from_reader(file).expect("JSON was not well-formatted"); - - let nft_creator_key = nft::get_creator_key(&args.nft_address); - let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); - let nft_creator_address = - match rpc::query_storage_value::
(&client, &nft_creator_key) - .await - { - Some(addr) => addr, - None => { - eprintln!("No creator key found for {}", &args.nft_address); - safe_exit(1); - } - }; - - let signer = Some(WalletAddress::new(nft_creator_address.to_string())); - - let data = MintNft { - address: args.nft_address, - creator: nft_creator_address, - tokens: nft_tokens, - }; - - let data = data.try_to_vec().expect( - "Encoding transfer data to initialize a new account shouldn't fail", - ); - - let tx_code = ctx.read_wasm(TX_MINT_NFT); - - let tx = Tx::new(tx_code, Some(data)); - process_tx(ctx, &args.tx, tx, signer.as_ref()).await; -} - pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { let file = File::open(&args.proposal_data).expect("File must exist."); let proposal: Proposal = @@ -695,7 +621,7 @@ pub async fn submit_init_proposal(mut ctx: Context, args: args::InitProposal) { safe_exit(1) }; - let balance = rpc::get_token_balance(&client, &m1t(), &proposal.author) + let balance = rpc::get_token_balance(&client, &nam(), &proposal.author) .await .unwrap_or_default(); if balance @@ -967,7 +893,7 @@ pub async fn submit_bond(ctx: Context, args: args::Bond) { // Check bond's source (source for delegation or validator for self-bonds) // balance let bond_source = source.as_ref().unwrap_or(&validator); - let balance_key = token::balance_key(&address::xan(), bond_source); + let balance_key = token::balance_key(&address::nam(), bond_source); let client = HttpClient::new(args.tx.ledger_address.clone()).unwrap(); match rpc::query_storage_value::(&client, &balance_key).await { diff --git a/apps/src/lib/node/ledger/mod.rs b/apps/src/lib/node/ledger/mod.rs index 0a1695de332..d4d6d4d2905 100644 --- a/apps/src/lib/node/ledger/mod.rs +++ b/apps/src/lib/node/ledger/mod.rs @@ -2,8 +2,6 @@ mod abortable; mod broadcaster; mod ethereum_node; pub mod events; -pub mod protocol; -pub mod rpc; mod shell; mod shims; pub mod storage; diff --git a/apps/src/lib/node/ledger/rpc.rs b/apps/src/lib/node/ledger/rpc.rs deleted file mode 100644 index 4cbedca8bc2..00000000000 --- a/apps/src/lib/node/ledger/rpc.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! RPC endpoint is used for ledger state queries - -use std::fmt::Display; -use std::str::FromStr; - -use namada::types::address::Address; -use namada::types::hash::{self, Hash}; -use namada::types::storage; -use thiserror::Error; - -use crate::facade::tendermint::abci::Path as AbciPath; - -/// RPC query path. -#[derive(Debug, Clone)] -pub enum Path { - /// Dry run a transaction. - DryRunTx, - /// Epoch of the last committed block. - Epoch, - /// Read a storage value with exact storage key. - Value(storage::Key), - /// Read a range of storage values with a matching key prefix. - Prefix(storage::Key), - /// Check if the given storage key exists. - HasKey(storage::Key), - /// Check if a transaction was accepted. - Accepted { tx_hash: Hash }, - /// Check if a transaction was applied. - Applied { tx_hash: Hash }, -} - -#[derive(Debug, Clone)] -pub struct BalanceQuery { - #[allow(dead_code)] - owner: Option
, - #[allow(dead_code)] - token: Option
, -} - -const DRY_RUN_TX_PATH: &str = "dry_run_tx"; -const EPOCH_PATH: &str = "epoch"; -const VALUE_PREFIX: &str = "value"; -const PREFIX_PREFIX: &str = "prefix"; -const HAS_KEY_PREFIX: &str = "has_key"; -const ACCEPTED_PREFIX: &str = "accepted"; -const APPLIED_PREFIX: &str = "applied"; - -impl Display for Path { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Path::DryRunTx => write!(f, "{}", DRY_RUN_TX_PATH), - Path::Epoch => write!(f, "{}", EPOCH_PATH), - Path::Value(storage_key) => { - write!(f, "{}/{}", VALUE_PREFIX, storage_key) - } - Path::Prefix(storage_key) => { - write!(f, "{}/{}", PREFIX_PREFIX, storage_key) - } - Path::HasKey(storage_key) => { - write!(f, "{}/{}", HAS_KEY_PREFIX, storage_key) - } - Path::Accepted { tx_hash } => { - write!(f, "{ACCEPTED_PREFIX}/{tx_hash}") - } - Path::Applied { tx_hash } => { - write!(f, "{APPLIED_PREFIX}/{tx_hash}") - } - } - } -} - -impl FromStr for Path { - type Err = PathParseError; - - fn from_str(s: &str) -> Result { - match s { - DRY_RUN_TX_PATH => Ok(Self::DryRunTx), - EPOCH_PATH => Ok(Self::Epoch), - _ => match s.split_once('/') { - Some((VALUE_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Value(key)) - } - Some((PREFIX_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::Prefix(key)) - } - Some((HAS_KEY_PREFIX, storage_key)) => { - let key = storage::Key::parse(storage_key) - .map_err(PathParseError::InvalidStorageKey)?; - Ok(Self::HasKey(key)) - } - Some((ACCEPTED_PREFIX, tx_hash)) => { - let tx_hash = tx_hash - .try_into() - .map_err(PathParseError::InvalidTxHash)?; - Ok(Self::Accepted { tx_hash }) - } - Some((APPLIED_PREFIX, tx_hash)) => { - let tx_hash = tx_hash - .try_into() - .map_err(PathParseError::InvalidTxHash)?; - Ok(Self::Applied { tx_hash }) - } - _ => Err(PathParseError::InvalidPath(s.to_string())), - }, - } - } -} - -impl From for AbciPath { - fn from(path: Path) -> Self { - let path = path.to_string(); - // TODO: update in tendermint-rs to allow to construct this from owned - // string. It's what `from_str` does anyway - AbciPath::from_str(&path).unwrap() - } -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum PathParseError { - #[error("Unrecognized query path: {0}")] - InvalidPath(String), - #[error("Invalid storage key: {0}")] - InvalidStorageKey(storage::Error), - #[error("Invalid transaction hash: {0}")] - InvalidTxHash(hash::Error), -} diff --git a/apps/src/lib/node/ledger/shell/finalize_block.rs b/apps/src/lib/node/ledger/shell/finalize_block.rs index c5119f5cf1c..a02c9b75590 100644 --- a/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -1,5 +1,6 @@ //! Implementation of the `FinalizeBlock` ABCI++ method for the Shell +use namada::ledger::protocol; use namada::types::storage::{BlockHash, Header}; use namada::types::transaction::protocol::ProtocolTxType; @@ -340,7 +341,7 @@ where /// are covered by the e2e tests. #[cfg(test)] mod test_finalize_block { - use namada::types::address::xan; + use namada::types::address::nam; use namada::types::ethereum_events::EthAddress; use namada::types::storage::Epoch; use namada::types::transaction::{EncryptionKey, Fee}; @@ -372,7 +373,7 @@ mod test_finalize_block { let wrapper = WrapperTx::new( Fee { amount: i.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -443,7 +444,7 @@ mod test_finalize_block { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -495,7 +496,7 @@ mod test_finalize_block { let wrapper = WrapperTx { fee: Fee { amount: 0.into(), - token: xan(), + token: nam(), }, pk: keypair.ref_to(), epoch: Epoch(0), @@ -561,7 +562,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -592,7 +593,7 @@ mod test_finalize_block { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/governance.rs b/apps/src/lib/node/ledger/shell/governance.rs index f5e9505909e..6aadccf3713 100644 --- a/apps/src/lib/node/ledger/shell/governance.rs +++ b/apps/src/lib/node/ledger/shell/governance.rs @@ -3,10 +3,12 @@ use namada::ledger::governance::utils::{ compute_tally, get_proposal_votes, ProposalEvent, }; use namada::ledger::governance::vp::ADDRESS as gov_address; +use namada::ledger::protocol; use namada::ledger::slash_fund::ADDRESS as slash_fund_address; +use namada::ledger::storage::traits::StorageHasher; use namada::ledger::storage::types::encode; use namada::ledger::storage::{DBIter, DB}; -use namada::types::address::{xan as m1t, Address}; +use namada::types::address::{nam, Address}; use namada::types::governance::TallyResult; use namada::types::storage::Epoch; use namada::types::token; @@ -179,7 +181,7 @@ where // transfer proposal locked funds shell .storage - .transfer(&m1t(), funds, &gov_address, &transfer_address); + .transfer(&nam(), funds, &gov_address, &transfer_address); } Ok(proposals_result) diff --git a/apps/src/lib/node/ledger/shell/init_chain.rs b/apps/src/lib/node/ledger/shell/init_chain.rs index 890e8a45133..d8d4ce738cc 100644 --- a/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/apps/src/lib/node/ledger/shell/init_chain.rs @@ -228,7 +228,7 @@ where // Account balance (tokens no staked in PoS) self.storage .write( - &token::balance_key(&address::xan(), addr), + &token::balance_key(&address::nam(), addr), validator .non_staked_balance .try_to_vec() diff --git a/apps/src/lib/node/ledger/shell/mod.rs b/apps/src/lib/node/ledger/shell/mod.rs index ae8aade9b44..aa12b80f1b9 100644 --- a/apps/src/lib/node/ledger/shell/mod.rs +++ b/apps/src/lib/node/ledger/shell/mod.rs @@ -19,18 +19,18 @@ use std::mem; use std::path::{Path, PathBuf}; #[allow(unused_imports)] use std::rc::Rc; -use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use namada::ledger::gas::BlockGasMeter; -use namada::ledger::pos; use namada::ledger::pos::namada_proof_of_stake::types::{ ActiveValidator, ValidatorSetUpdate, }; use namada::ledger::pos::namada_proof_of_stake::PosBase; +use namada::ledger::protocol::ShellParams; use namada::ledger::storage::traits::{Sha256Hasher, StorageHasher}; use namada::ledger::storage::write_log::WriteLog; use namada::ledger::storage::{DBIter, Storage, DB}; +use namada::ledger::{pos, protocol}; use namada::proto::{self, Tx}; use namada::types::address; use namada::types::chain::ChainId; @@ -48,8 +48,6 @@ use num_traits::{FromPrimitive, ToPrimitive}; use thiserror::Error; use tokio::sync::mpsc::{Receiver, UnboundedSender}; -use super::protocol::ShellParams; -use super::rpc; use crate::config::{genesis, TendermintMode}; use crate::facade::tendermint_proto::abci::{ Misbehavior as Evidence, MisbehaviorType as EvidenceType, ValidatorUpdate, @@ -60,7 +58,7 @@ use crate::node::ledger::events::log::EventLog; use crate::node::ledger::events::Event; use crate::node::ledger::shims::abcipp_shim_types::shim; use crate::node::ledger::shims::abcipp_shim_types::shim::response::TxResult; -use crate::node::ledger::{protocol, storage, tendermint_node}; +use crate::node::ledger::{storage, tendermint_node}; #[allow(unused_imports)] use crate::wallet::{ValidatorData, ValidatorKeys}; use crate::{config, wallet}; @@ -705,44 +703,6 @@ where response } - /// Simulate validation and application of a transaction. - fn dry_run_tx(&self, tx_bytes: &[u8]) -> response::Query { - let mut response = response::Query::default(); - let mut gas_meter = BlockGasMeter::default(); - let mut write_log = WriteLog::default(); - let mut vp_wasm_cache = self.vp_wasm_cache.read_only(); - let mut tx_wasm_cache = self.tx_wasm_cache.read_only(); - match Tx::try_from(tx_bytes) { - Ok(tx) => { - match protocol::apply_wasm_tx( - tx, - tx_bytes.len(), - ShellParams { - block_gas_meter: &mut gas_meter, - write_log: &mut write_log, - storage: &self.storage, - vp_wasm_cache: &mut vp_wasm_cache, - tx_wasm_cache: &mut tx_wasm_cache, - }, - ) - .map_err(Error::TxApply) - { - Ok(result) => response.info = result.to_string(), - Err(error) => { - response.code = 1; - response.log = format!("{}", error); - } - } - response - } - Err(err) => { - response.code = 1; - response.log = format!("{}", Error::TxDecoding(err)); - response - } - } - } - /// Lookup a validator's keypair for their established account from their /// wallet. If the node is not validator, this function returns None #[allow(dead_code)] @@ -779,6 +739,23 @@ where } } +impl<'a, D, H> From<&'a mut Shell> + for ShellParams<'a, D, H, namada::vm::WasmCacheRwAccess> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + fn from(shell: &'a mut Shell) -> Self { + Self { + block_gas_meter: &mut shell.gas_meter, + write_log: &mut shell.write_log, + storage: &shell.storage, + vp_wasm_cache: &mut shell.vp_wasm_cache, + tx_wasm_cache: &mut shell.tx_wasm_cache, + } + } +} + /// Helper functions and types for writing unit tests /// for the shell #[cfg(test)] @@ -791,7 +768,7 @@ mod test_utils { use namada::ledger::storage::mockdb::MockDB; use namada::ledger::storage::traits::Sha256Hasher; use namada::ledger::storage::{BlockStateWrite, MerkleTree}; - use namada::types::address::{xan, EstablishedAddressGen}; + use namada::types::address::{nam, EstablishedAddressGen}; use namada::types::chain::ChainId; use namada::types::hash::Hash; use namada::types::key::*; @@ -1091,7 +1068,7 @@ mod test_utils { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 6ad11f2e3c2..a99d3df269f 100644 --- a/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -285,7 +285,7 @@ mod test_prepare_proposal { }; use namada::ledger::pos::namada_proof_of_stake::PosBase; use namada::proto::{Signed, SignedTxData}; - use namada::types::address::xan; + use namada::types::address::nam; use namada::types::ethereum_events::EthereumEvent; use namada::types::key::{common, RefTo}; use namada::types::storage::{BlockHeight, Epoch}; @@ -910,7 +910,7 @@ mod test_prepare_proposal { WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -981,7 +981,7 @@ mod test_prepare_proposal { let wrapper_tx = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/process_proposal.rs b/apps/src/lib/node/ledger/shell/process_proposal.rs index bf341a51a08..dc4de4fb9b6 100644 --- a/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -429,7 +429,7 @@ mod test_process_proposal { use assert_matches::assert_matches; use borsh::BorshDeserialize; use namada::proto::SignedTxData; - use namada::types::address::xan; + use namada::types::address::nam; use namada::types::ethereum_events::EthereumEvent; use namada::types::hash::Hash; use namada::types::key::*; @@ -748,7 +748,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -795,7 +795,7 @@ mod test_process_proposal { let mut wrapper = WrapperTx::new( Fee { amount: 100.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -877,7 +877,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 1.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -922,7 +922,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: Amount::whole(1_000_100), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -970,7 +970,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: i.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -1034,7 +1034,7 @@ mod test_process_proposal { let wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -1085,7 +1085,7 @@ mod test_process_proposal { let mut wrapper = WrapperTx::new( Fee { amount: 0.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -1130,7 +1130,7 @@ mod test_process_proposal { let wrapper = WrapperTx { fee: Fee { amount: 0.into(), - token: xan(), + token: nam(), }, pk: keypair.ref_to(), epoch: Epoch(0), diff --git a/apps/src/lib/node/ledger/shell/queries.rs b/apps/src/lib/node/ledger/shell/queries.rs index 1fad92eb9da..3f854b2a3d7 100644 --- a/apps/src/lib/node/ledger/shell/queries.rs +++ b/apps/src/lib/node/ledger/shell/queries.rs @@ -8,19 +8,19 @@ use namada::ledger::parameters::EpochDuration; use namada::ledger::pos::namada_proof_of_stake::types::VotingPower; use namada::ledger::pos::types::WeightedValidator; use namada::ledger::pos::PosParams; +use namada::ledger::queries::{RequestCtx, ResponseQuery}; +use namada::ledger::storage_api; use namada::types::address::Address; use namada::types::ethereum_events::EthAddress; use namada::types::key; use namada::types::key::dkg_session_keys::DkgPublicKey; -use namada::types::storage::{Epoch, Key, PrefixValue}; +use namada::types::storage::Epoch; use namada::types::token::{self, Amount}; use namada::types::vote_extensions::validator_set_update::EthAddrBook; use super::*; -use crate::facade::tendermint_proto::crypto::{ProofOp, ProofOps}; use crate::facade::tendermint_proto::google::protobuf; use crate::facade::tendermint_proto::types::EvidenceParams; -use crate::node::ledger::events::log::dumb_queries; use crate::node::ledger::response; #[derive(Error, Debug)] @@ -55,47 +55,39 @@ where /// the default if `path` is not a supported string. /// INVARIANT: This method must be stateless. pub fn query(&self, query: request::Query) -> response::Query { - use rpc::Path; - let height = match query.height { - 0 => self.storage.get_block_height().0, - 1.. => BlockHeight(query.height as u64), - _ => { + let ctx = RequestCtx { + storage: &self.storage, + vp_wasm_cache: self.vp_wasm_cache.read_only(), + tx_wasm_cache: self.tx_wasm_cache.read_only(), + }; + + // Convert request to domain-type + let request = match namada::ledger::queries::RequestQuery::try_from_tm( + &self.storage, + query, + ) { + Ok(request) => request, + Err(err) => { return response::Query { code: 1, - info: format!( - "The query height is invalid: {}", - query.height - ), + info: format!("Unexpected query: {}", err), ..Default::default() }; } }; - match Path::from_str(&query.path) { - Ok(path) => match path { - Path::DryRunTx => self.dry_run_tx(&query.data), - Path::Epoch => { - let (epoch, _gas) = self.storage.get_last_epoch(); - let value = namada::ledger::storage::types::encode(&epoch); - response::Query { - value, - ..Default::default() - } - } - Path::Value(storage_key) => { - self.read_storage_value(&storage_key, height, query.prove) - } - Path::Prefix(storage_key) => { - self.read_storage_prefix(&storage_key, height, query.prove) - } - Path::HasKey(storage_key) => self.has_storage_key(&storage_key), - Path::Accepted { tx_hash } => { - let matcher = dumb_queries::QueryMatcher::accepted(tx_hash); - self.query_event_log(matcher) - } - Path::Applied { tx_hash } => { - let matcher = dumb_queries::QueryMatcher::applied(tx_hash); - self.query_event_log(matcher) - } + + // Invoke the root RPC handler - returns borsh-encoded data on success + let result = namada::ledger::queries::handle_path(ctx, &request); + match result { + Ok(ResponseQuery { + data, + info, + proof_ops, + }) => response::Query { + value: data, + info, + proof_ops, + ..Default::default() }, Err(err) => response::Query { code: 1, @@ -106,237 +98,20 @@ where } /// Query events in the event log matching the given query. - fn query_event_log( - &self, - matcher: dumb_queries::QueryMatcher, - ) -> response::Query { - let value = self - .event_log() - .iter_with_matcher(matcher) - .cloned() - .collect::>() - .try_to_vec() - .unwrap(); - - response::Query { - value, - ..Default::default() - } - } - - /// Query to check if a storage key exists. - fn has_storage_key(&self, key: &Key) -> response::Query { - match self.storage.has_key(key) { - Ok((has_key, _gas)) => response::Query { - value: has_key.try_to_vec().unwrap(), - ..Default::default() - }, - Err(err) => response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }, - } - } - - /// Query to read a range of values from storage with a matching prefix. The - /// value in successful response is a [`Vec`] encoded with - /// [`BorshSerialize`]. - fn read_storage_prefix( + pub fn query_event_log( &self, - key: &Key, - height: BlockHeight, - is_proven: bool, - ) -> response::Query { - if height != self.storage.get_block_height().0 { - return response::Query { - code: 2, - info: format!( - "Prefix read works with only the latest height: height {}", - height - ), - ..Default::default() - }; - } - let (iter, _gas) = self.storage.iter_prefix(key); - let mut iter = iter.peekable(); - if iter.peek().is_none() { - response::Query { - code: 1, - info: format!("No value found for key: {}", key), - ..Default::default() - } - } else { - let values: std::result::Result< - Vec, - namada::types::storage::Error, - > = iter - .map(|(key, value, _gas)| { - let key = Key::parse(key)?; - Ok(PrefixValue { key, value }) - }) - .collect(); - match values { - Ok(values) => { - let proof_ops = if is_proven { - let mut ops = vec![]; - for PrefixValue { key, value } in &values { - match self.storage.get_existence_proof( - key, - value.clone().into(), - height, - ) { - Ok(p) => { - let mut cur_ops: Vec = p - .ops - .into_iter() - .map(|op| { - #[cfg(feature = "abcipp")] - { - ProofOp { - r#type: op.field_type, - key: op.key, - data: op.data, - } - } - #[cfg(not(feature = "abcipp"))] - { - op.into() - } - }) - .collect(); - ops.append(&mut cur_ops); - } - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } - // ops is not empty in this case - Some(ProofOps { ops }) - } else { - None - }; - let value = values.try_to_vec().unwrap(); - response::Query { - value, - proof_ops, - ..Default::default() - } - } - Err(err) => response::Query { - code: 1, - info: format!( - "Error parsing a storage key {}: {}", - key, err - ), - ..Default::default() - }, - } - } - } - - /// Query to read a value from storage - fn read_storage_value( - &self, - key: &Key, - height: BlockHeight, - is_proven: bool, - ) -> response::Query { - match self.storage.read_with_height(key, height) { - Ok((Some(value), _gas)) => { - let proof_ops = if is_proven { - match self.storage.get_existence_proof( - key, - value.clone().into(), - height, - ) { - Ok(proof) => Some({ - #[cfg(feature = "abcipp")] - { - let ops = proof - .ops - .into_iter() - .map(|op| ProofOp { - r#type: op.field_type, - key: op.key, - data: op.data, - }) - .collect(); - ProofOps { ops } - } - #[cfg(not(feature = "abcipp"))] - { - proof.into() - } - }), - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } else { - None - }; - response::Query { - value, - proof_ops, - ..Default::default() - } - } - Ok((None, _gas)) => { - let proof_ops = if is_proven { - match self.storage.get_non_existence_proof(key, height) { - Ok(proof) => Some({ - #[cfg(feature = "abcipp")] - { - let ops = proof - .ops - .into_iter() - .map(|op| ProofOp { - r#type: op.field_type, - key: op.key, - data: op.data, - }) - .collect(); - ProofOps { ops } - } - #[cfg(not(feature = "abcipp"))] - { - proof.into() - } - }), - Err(err) => { - return response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }; - } - } - } else { - None - }; - response::Query { - code: 1, - info: format!("No value found for key: {}", key), - proof_ops, - ..Default::default() - } - } - Err(err) => response::Query { - code: 2, - info: format!("Storage error: {}", err), - ..Default::default() - }, - } + token: &Address, + owner: &Address, + ) -> token::Amount { + let balance = storage_api::StorageRead::read( + &self.storage, + &token::balance_key(token, owner), + ); + // Storage read must not fail, but there might be no value, in which + // case default (0) is returned + balance + .expect("Storage read in the protocol must not fail") + .unwrap_or_default() } } diff --git a/apps/src/lib/node/ledger/tendermint_node.rs b/apps/src/lib/node/ledger/tendermint_node.rs index 0b4e6b2e9ec..dc932f0ada4 100644 --- a/apps/src/lib/node/ledger/tendermint_node.rs +++ b/apps/src/lib/node/ledger/tendermint_node.rs @@ -409,6 +409,12 @@ async fn update_tendermint_config( config.instrumentation.namespace = tendermint_config.instrumentation_namespace; + #[cfg(feature = "abciplus")] + { + config.consensus.timeout_commit = + tendermint_config.consensus_timeout_commit; + } + let mut file = OpenOptions::new() .write(true) .truncate(true) diff --git a/documentation/dev/src/explore/design/actors.md b/documentation/dev/src/explore/design/actors.md index 88e26f3d640..6172505adb8 100644 --- a/documentation/dev/src/explore/design/actors.md +++ b/documentation/dev/src/explore/design/actors.md @@ -1,6 +1,6 @@ # Actors and Incentives -Namada consists of various actors fulfilling various roles in the network. They are all incentivized to act for the good of the network. The native Namada token `XAN` is used to settle transaction fees and pay for the incentives in Namada. +Namada consists of various actors fulfilling various roles in the network. They are all incentivized to act for the good of the network. The native Namada token `NAM` is used to settle transaction fees and pay for the incentives in Namada. ## Fees associated with a transaction @@ -9,7 +9,7 @@ Users of Namada can - transfer private assets they hold to other users and - barter assets with other users. -Each transaction may be associated with the following fees, paid in `XAN`: +Each transaction may be associated with the following fees, paid in `NAM`: - **Execution fees** to compensate for computing, storage and memory costs, charges at 2 stages: - **initial fee (init_f)**: charged before the transaction is settled diff --git a/documentation/dev/src/explore/design/ledger/governance.md b/documentation/dev/src/explore/design/ledger/governance.md index da26c8e989b..d9e578eed5f 100644 --- a/documentation/dev/src/explore/design/ledger/governance.md +++ b/documentation/dev/src/explore/design/ledger/governance.md @@ -1,10 +1,10 @@ # Governance -Namada introduce a governance mechanism to propose and apply protocol changes with and without the need for an hard fork. Anyone holding some M1T will be able to prosose some changes to which delegators and validator will cast their yay or nay votes. Governance on Namada supports both signaling and voting mechanism. The difference between the the two, is that the former is needed when the changes require an hard fork. In cases where the chain is not able to produce blocks anymore, Namada relies an off chain signaling mechanism to agree on a common strategy. +Namada introduces a governance mechanism to propose and apply protocol changes with and without the need for a hard fork. Anyone holding some NAM will be able to propose some changes to which delegators and validators will cast their yay or nay votes. Governance on Namada supports both signaling and voting mechanism. The difference between the two, is that the former is needed when the changes require a hard fork. In cases where the chain is not able to produce blocks anymore, Namada relies on an off-chain signaling mechanism to agree on a common strategy. ## Governance & SlashFund addresses -Governance introduce two internal address with their corresponding native vps: +Governance introduces two internal addresses with their corresponding native vps: - Governance address, which is in charge of validating on-chain proposals and votes - SlashFund address, which is in charge of holding slashed funds @@ -20,7 +20,7 @@ Also, it introduces some protocol parameters: ## On-chain proposals -On-chain proposals are created under the `governance_address` storage space and, by default, this storage space is initialized with following storage keys: +On-chain proposals are created under the `governance_address` storage space and, by default, this storage space is initialized with the following storage keys: ``` /$GovernanceAddress/counter: u64 @@ -32,11 +32,12 @@ On-chain proposals are created under the `governance_address` storage space and, /$GovernanceAddress/min_proposal_grace_epochs: u64 ``` -In order to create a valid proposal, a transaction need to modify these storage keys: +In order to create a valid proposal, a transaction needs to modify these storage keys: ``` -/$GovernanceAddress/proposal/$id/content : Vec -/$GovernanceAddress/proposal/$id/author : Address +/$GovernanceAddress/proposal/$id/content: Vec +/$GovernanceAddress/proposal/$id/author: Address +/$GovernanceAddress/proposal/$id/type: ProposalType /$GovernanceAddress/proposal/$id/startEpoch: Epoch /$GovernanceAddress/proposal/$id/endEpoch: Epoch /$GovernanceAddress/proposal/$id/graceEpoch: Epoch @@ -48,7 +49,7 @@ and follow these rules: - `$id` must be equal to `counter + 1`. - `startEpoch` must: - - be grater than `currentEpoch`, where current epoch is the epoch in which the transaction is executed and included in a block + - be greater than `currentEpoch`, where current epoch is the epoch in which the transaction is executed and included in a block - be a multiple of `min_proposal_period`. - `endEpoch` must: - be at least `min_proposal_period` epochs greater than `startEpoch` @@ -60,13 +61,18 @@ and follow these rules: - `funds` must be equal to `min_proposal_fund` and should be moved to the `governance_address`. - `content` should follow the `Namada Improvement Proposal schema` and must be less than `max_proposal_content_size` kibibytes. - `author` must be a valid address on-chain +- `type` defines: + - the optional payload (memo) attached to the vote + - which actors should be allowed to vote (delegators and validators or validators only) + - the threshold to be used in the tally process + - the optional wasm code attached to the proposal -A proposal gets accepted if, at least 2/3 of the total voting power (computed at the epoch definied in the `startEpoch` field) vote `yay`. If the proposal is accepted, the locked funds are returned to the address definied in the `proposal_author` field, otherwise are moved to the slash fund address. +A proposal gets accepted if enough `yay` votes (net of the voting power) to match the threshold specified by `ProposalType` (computed at the epoch defined in the `endEpoch` field) are reached. If the proposal is accepted, the locked funds are returned to the address defined in the `proposal_author` field, otherwise are moved to the slash fund address. The `proposal_code` field can execute arbitrary code in the form of a wasm transaction. If the proposal gets accepted, the code is executed in the first block of the epoch following the `graceEpoch`. -Proposal can be submitted by any address as long as the above rules are respected. Votes can be casted only by active validators and delegator (at epoch `startEpoch` or less). -Moreover, validator can vote only during the first 2/3 of the voting period (from `startEpoch` and 2/3 of `endEpoch` - `startEpoch`). +Proposals can be submitted by any address as long as the above rules are respected. Votes can be cast only by active validators and delegators (at epoch `endEpoch` or less): the proposal type could impose more constraints on this. +Moreover, if delegators are allowed to vote, validators can vote only during the first 2/3 of the voting period (from `startEpoch` and 2/3 of `endEpoch` - `startEpoch`). The preferred content template (`Namada Improvement Proposal schema`) is the following: @@ -84,24 +90,24 @@ The preferred content template (`Namada Improvement Proposal schema`) is the fol } ``` -In order to vote a proposal, a transaction should modify the following storage key: +In order to vote on a proposal, a transaction should modify the following storage key: ``` /$GovernanceAddress/proposal/$id/vote/$validator_address/$voter_address: ProposalVote ``` -where ProposalVote is a borsh encoded string containing either `yay` or `nay`, `$validator_address` is the delegation validator address and the `$voter_address` is the address of who is voting. A voter can be cast for each delegation. +where `ProposalVote` is an enum representing a `Yay` or `Nay` vote: the yay variant also contains the specific memo (if any) required for that proposal. `$validator_address` is the delegation validator address and the `$voter_address` is the address of who is voting. A voter can be cast for each delegation. -Vote is valid if it follow this rules: +Vote is valid if it follows these rules: -- vote can be sent only by validator or delegators -- validator can vote only during the first 2/3 of the total voting period, delegator can vote for the whole voting period +- vote can be sent only by validator or delegators (also depending on the proposal type) +- if delegators can vote, validators can vote only during the first 2/3 of the total voting period, delegators can vote for the whole voting period -The outcome of a proposal is compute at the epoch specific in the `endEpoch` field and executed at `graceEpoch` field (if it contains a non-empty `proposalCode` field). -A proposal is accepted only if more than 2/3 of the voting power vote `yay`. +The outcome of a proposal is computed at the epoch specific in the `endEpoch` field and executed at `graceEpoch` field (if it contains a non-empty `proposalCode` field). +A proposal is accepted only if enough `yay` votes (net of the voting power) to match the threshold set in `ProposalType` is reached. If a proposal gets accepted, the locked funds will be reimbursed to the author. In case it gets rejected, the locked funds will be moved to slash fund. ## Off-chain proposal -In case where its not possibile to run a proposal online (for example, when the chain is halted), an offline mechanism can be used. -The ledger offers the possibility to create and sign proposal which are verified against a specific chain epoch. +In cases where it's not possible to run a proposal online (for example, when the chain is halted), an offline mechanism can be used. +The ledger offers the possibility to create and sign proposals that are verified against a specific chain epoch. diff --git a/documentation/dev/src/explore/design/ledger/pos-integration.md b/documentation/dev/src/explore/design/ledger/pos-integration.md index d441e5ddcac..dda5abc7489 100644 --- a/documentation/dev/src/explore/design/ledger/pos-integration.md +++ b/documentation/dev/src/explore/design/ledger/pos-integration.md @@ -27,7 +27,7 @@ All [the data relevant to the PoS system](https://specs.namada.net/economics/pro - `validator/{validator_address}/address_raw_hash` (required): raw hash of validator's address associated with the address is used for look-up of validator address from a raw hash - TBA (e.g. alias, website, description, delegation commission rate, etc.) -Only XAN tokens can be staked in bonds. The tokens being staked (bonds and unbonds amounts) are kept in the PoS account under `{xan_address}/balance/{pos_address}` until they are withdrawn. +Only NAM tokens can be staked in bonds. The tokens being staked (bonds and unbonds amounts) are kept in the PoS account under `{nam_address}/balance/{pos_address}` until they are withdrawn. ## Initialization @@ -39,7 +39,7 @@ Staking rewards for validators are rewarded in Tendermint's method `BeginBlock` To a validator who proposed a block (`block.header.proposer_address`), the system rewards tokens based on the `block_proposer_reward` PoS parameter and each validator that voted on a block (`block.last_commit_info.validator` who `signed_last_block`) receives `block_vote_reward`. -All the fees that are charged in a transaction execution (DKG transaction wrapper fee and transactions applied in a block) are transferred into a fee pool, which is another special account controlled by the PoS module. Note that the fee pool account may contain tokens other than the staking token XAN. +All the fees that are charged in a transaction execution (DKG transaction wrapper fee and transactions applied in a block) are transferred into a fee pool, which is another special account controlled by the PoS module. Note that the fee pool account may contain tokens other than the staking token NAM. - TODO describe the fee pool, related to , and diff --git a/documentation/dev/src/explore/design/ledger/vp.md b/documentation/dev/src/explore/design/ledger/vp.md index 7cf939f331c..46eb28e5713 100644 --- a/documentation/dev/src/explore/design/ledger/vp.md +++ b/documentation/dev/src/explore/design/ledger/vp.md @@ -39,9 +39,9 @@ The Proof-of-Stake slash pool is a simple account with a native VP which can rec The [fungible token VP](https://github.com/anoma/anoma/tree/master/wasm/wasm_source) allows to associate accounts balances of a specific token under its account. -For illustration, users `Albert` and `Bertha` might hold some amount of token with the address `XAN`. Their balances would be stored in the `XAN`'s storage sub-space under the storage keys `@XAN/balance/@Albert` and `@XAN/balance/@Bertha`, respectively. When `Albert` or `Bertha` attempt to transact with their `XAN` tokens, its validity predicate would be triggered to check: +For illustration, users `Albert` and `Bertha` might hold some amount of token with the address `NAM`. Their balances would be stored in the `NAM`'s storage sub-space under the storage keys `@NAM/balance/@Albert` and `@NAM/balance/@Bertha`, respectively. When `Albert` or `Bertha` attempt to transact with their `NAM` tokens, its validity predicate would be triggered to check: -- the total supply of `XAN` token is preserved (i.e. inputs = outputs) +- the total supply of `NAM` token is preserved (i.e. inputs = outputs) - the senders (users whose balance has been deducted) are checked that their validity predicate has also been triggered Note that the fungible token VP doesn't need to know whether any of involved users accepted or rejected the transaction, because if any of the involved users rejects it, the whole transaction will be rejected. diff --git a/documentation/dev/src/specs/ledger/default-transactions.md b/documentation/dev/src/specs/ledger/default-transactions.md index 78ed5b2098e..fb254f0cd3b 100644 --- a/documentation/dev/src/specs/ledger/default-transactions.md +++ b/documentation/dev/src/specs/ledger/default-transactions.md @@ -36,13 +36,13 @@ Attach [UpdateVp](../encoding.md#updatevp) to the `data`. ### tx_bond -Self-bond `amount` of XAN token from `validator` (without `source`) or delegate to `validator` from `source`. +Self-bond `amount` of NAM token from `validator` (without `source`) or delegate to `validator` from `source`. Attach [Bond](../encoding.md#bond) to the `data`. ### tx_unbond -Unbond self-bonded `amount` of XAN token from the `validator` (without `source`) or unbond delegation from the `source` to the `validator`. +Unbond self-bonded `amount` of NAM token from the `validator` (without `source`) or unbond delegation from the `source` to the `validator`. Attach [Bond](../encoding.md#bond) to the `data`. diff --git a/documentation/specs/src/SUMMARY.md b/documentation/specs/src/SUMMARY.md index ff91734a45b..9bf87ed0657 100644 --- a/documentation/specs/src/SUMMARY.md +++ b/documentation/specs/src/SUMMARY.md @@ -2,7 +2,7 @@ - [Introduction](./introduction.md) - [Base ledger](./base-ledger.md) - - [Consensus](./base-ledger/consensus.md) + - [Core Concepts](./base-ledger/core-concepts.md) - [Execution](./base-ledger/execution.md) - [Governance](./base-ledger/governance.md) - [Default account](./base-ledger/default-account.md) diff --git a/documentation/specs/src/base-ledger.md b/documentation/specs/src/base-ledger.md index b345208e589..a848fa4eac5 100644 --- a/documentation/specs/src/base-ledger.md +++ b/documentation/specs/src/base-ledger.md @@ -1,3 +1,3 @@ ## Base ledger -The base ledger of Namada includes a [consensus system](./base-ledger/consensus.md), validity predicate-based [execution system](./base-ledger/execution.md), and signalling-based [governance mechanism](./base-ledger/governance.md). Namada's ledger also includes proof-of-stake, slashing, fees, and inflation funding for staking rewards, shielded pool incentives, and public goods -- these are specified in the [economics section](./economics.md). \ No newline at end of file +The base ledger of Namada includes a [consensus system](./base-ledger/core-concepts.md), validity predicate-based [execution system](./base-ledger/execution.md), and signalling-based [governance mechanism](./base-ledger/governance.md). Namada's ledger also includes proof-of-stake, slashing, fees, and inflation funding for staking rewards, shielded pool incentives, and public goods -- these are specified in the [economics section](./economics.md) \ No newline at end of file diff --git a/documentation/specs/src/base-ledger/core-concepts.md b/documentation/specs/src/base-ledger/core-concepts.md new file mode 100644 index 00000000000..083946f0b10 --- /dev/null +++ b/documentation/specs/src/base-ledger/core-concepts.md @@ -0,0 +1,3 @@ +# Consensus + +Namada uses [Tendermint Go](https://github.com/tendermint/tendermint) through the [tendermint-rs](https://github.com/heliaxdev/tendermint-rs) bindings in order to provide peer-to-peer transaction gossip, BFT consensus, and state machine replication for Namada's custom state machine. diff --git a/documentation/specs/src/base-ledger/execution.md b/documentation/specs/src/base-ledger/execution.md index 3395ffb858b..24b5fb992be 100644 --- a/documentation/specs/src/base-ledger/execution.md +++ b/documentation/specs/src/base-ledger/execution.md @@ -2,17 +2,20 @@ The Namada ledger execution system is based on an initial version of the [Anoma protocol](https://specs.anoma.net). The system implements a generic computational substrate with WASM-based transactions and validity predicate verification architecture, on top of which specific features of Namada such as IBC, proof-of-stake, and the MASP are built. +## Validity predicates + +Conceptually, a validity predicate (VP) is a function from the transaction's data and the storage state prior and posterior to a transaction execution returning a boolean value. A transaction may modify any data in the accounts' dynamic storage sub-space. Upon transaction execution, the VPs associated with the accounts whose storage has been modified are invoked to verify the transaction. If any of them reject the transaction, all of its storage modifications are discarded. ## Namada ledger -The Namada ledger is built on top of [Tendermint](https://docs.tendermint.com/master/spec/)'s [ABCI](https://docs.tendermint.com/master/spec/abci/) interface with a slight deviation from the ABCI convention: in Namada, the transactions are currently *not* being executed in ABCI's `DeliverTx` method, but rather in the `EndBlock` method. The reason for this is to prepare for future DKG and threshold decryption integration, which has not yet been fully finished and hence is out-of-scope for the initial release version of Namada. +The Namada ledger is built on top of [Tendermint](https://docs.tendermint.com/master/spec/)'s [ABCI](https://docs.tendermint.com/master/spec/abci/) interface with a slight deviation from the ABCI convention: in Namada, the transactions are currently *not* being executed in ABCI's [`DeliverTx` method](https://docs.tendermint.com/master/spec/abci/abci.html), but rather in the [`EndBlock` method](https://docs.tendermint.com/master/spec/abci/abci.html). The reason for this is to prepare for future DKG and threshold decryption integration. The ledger features an account-based system (in which UTXO-based systems such as the MASP can be internally implemented as specific accounts), where each account has a unique address and a dynamic key-value storage sub-space. Every account in Namada is associated with exactly one validity predicate. Fungible tokens, for example, are accounts, whose rules are governed by their validity predicates. Many of the base ledger subsystems specified here are themselves just special Namada accounts too (e.g. PoS, IBC and MASP). -Interaction with the Namada ledger are made possible via transactions (note [transaction whitelist](#transaction-and-validity-predicate-whitelist)). Please refer to the [protocol section](https://docs.anoma.network/master/specs/ledger.html#the-protocol) that specifies the transaction execution model. In Namada, transactions are allowed to perform arbitrary modifications to the storage of any account, but the transaction will be accepted and state changes applied only if all the validity predicates that were triggered by the transaction accept it. That is, the accounts whose storage sub-spaces were touched by the transaction and/or an account that was explicitly elected by the transaction as the verifier will all have their validity predicates verifying the transaction. A transaction can add any number of additional verifiers, but cannot remove the ones determined by the protocol. For example, a transparent fungible token transfer would typically trigger 3 validity predicates - those of the token, source and target addresses. +Interaction with the Namada ledger are made possible via transactions (note transaction whitelist below). In Namada, transactions are allowed to perform arbitrary modifications to the storage of any account, but the transaction will be accepted and state changes applied only if all the validity predicates that were triggered by the transaction accept it. That is, the accounts whose storage sub-spaces were touched by the transaction and/or an account that was explicitly elected by the transaction as the verifier will all have their validity predicates verifying the transaction. A transaction can add any number of additional verifiers, but cannot remove the ones determined by the protocol. For example, a transparent fungible token transfer would typically trigger 3 validity predicates - those of the token, source and target addresses. ## Supported validity predicates -Conceptually, a VP is a function from the transaction's data and the storage state prior and posterior to a transaction execution returning a boolean value. A transaction may modify any data in the accounts' dynamic storage sub-space. Upon transaction execution, the VPs associated with the accounts whose storage has been modified are invoked to verify the transaction. If any of them reject the transaction, all of its storage modifications are discarded. While the execution model is fully programmable, for Namada only a selected subset of provided validity predicates and transactions will be permitted through pre-defined whitelists configured at network launch. +While the execution model is fully programmable, for Namada only a selected subset of provided validity predicates and transactions will be permitted through pre-defined whitelists configured at network launch. There are some native VPs for internal transparent addresses that are built into the ledger. All the other VPs are implemented as WASM programs. One can build a custom VP using the [VP template](https://github.com/anoma/anoma/tree/master/wasm/vp_template) or use one of the pre-defined VPs. @@ -24,7 +27,7 @@ Supported validity predicates for Namada: - SlashFund (see [spec](./governance.md#SlashFundAddress)) - Protocol parameters - WASM - - Fungible token (see [spec](./fungible-token.md)) + - Fungible token (see [spec](./core-concepts.md)) - MASP (see [spec](../masp.md)) - - Implicit account VP (see [spec](./default-account.md)) - - k-of-n multisignature VP (see [spec](./multisignature.md)) + - Implicit account VP (see [spec](./core-concepts.md)) + - k-of-n multisignature VP (see [spec](./core-concepts.md)) diff --git a/documentation/specs/src/base-ledger/governance.md b/documentation/specs/src/base-ledger/governance.md index 40c7281af96..fe1108a8327 100644 --- a/documentation/specs/src/base-ledger/governance.md +++ b/documentation/specs/src/base-ledger/governance.md @@ -1,11 +1,23 @@ # Namada Governance -Namada introduces a governance mechanism to propose and apply protocol changes with or without the need for a hard fork. Anyone holding some `NAM` will be able to propose some changes to which delegators and validators will cast their `yay` or `nay` votes. Governance on Namada supports both `signaling` and `voting` mechanisms. The difference between the the two is that the former is needed when the changes require a hard fork. In cases where the chain is not able to produce blocks anymore, Namada relies on [off chain](#off-chain-protocol) signaling to agree on a common move. +Before describing Namada governance, it is useful to define the concepts of validators, delegators, and NAM. + +Namada's economic model is based around a single native token, NAM, which is controlled by the protocol. + +A Namada validator is an account with a public consensus key, which may participate in producing blocks and governance activities. A validator may not also be a delegator. + +A Namada delegator is an account that delegates some tokens to a validator. A delegator may not also be a validator. + +Namada introduces a governance mechanism to propose and apply protocol changes with or without the need for a hard fork. Anyone holding some `NAM` will be able to propose some changes to which delegators and validators will cast their `yay` or `nay` votes; in addition it will also be possible to attach some payloads to votes, in specific cases, to embed additional information.Governance on Namada supports both `signaling` and `voting` mechanisms. The difference between the the two is that the former is needed when the changes require a hard fork. In cases where the chain is not able to produce blocks anymore, Namada relies on [off chain](#off-chain-protocol) signaling to agree on a common move. + +Further informtion regarding delegators, validators, and NAM is contained in the [economics section](../economics.md). ## On-chain protocol ### Governance Address + Governance adds 2 internal addresses: + - `GovernanceAddress` - `SlashFundAddress` @@ -13,11 +25,13 @@ The first internal address contains all the proposals under its address space. The second internal address holds the funds of rejected proposals. ### Governance storage + Each proposal will be stored in a sub-key under the internal proposal address. The storage keys involved are: ``` /\$GovernanceAddress/proposal/\$id/content: Vec /\$GovernanceAddress/proposal/\$id/author: Address +/\$GovernanceAddress/proposal/\$id/type: ProposalType /\$GovernanceAddress/proposal/\$id/start_epoch: Epoch /\$GovernanceAddress/proposal/\$id/end_epoch: Epoch /\$GovernanceAddress/proposal/\$id/grace_epoch: Epoch @@ -26,8 +40,10 @@ Each proposal will be stored in a sub-key under the internal proposal address. T /\$GovernanceAddress/proposal/epoch/\$id: u64 ``` +An epoch is a range of blocks or time that is defined by the base ledger and made available to the PoS system. This document assumes that epochs are identified by consecutive natural numbers. All the data relevant to PoS are [associated with epochs](../economics/proof-of-stake/bonding-mechanism.md#epoched-data). + - `Author` address field will be used to credit the locked funds if the proposal is approved. -- `/\$GovernanceAddress/proposal/\$epoch/\$id` is used for easing the ledger governance execution. `\$epoch` refers to the same value as the on specific in the `grace_epoch` field. +- `/\$GovernanceAddress/proposal/\$epoch/\$id` is used for easing the ledger governance execution. `\$epoch` refers to the same value as the one specified in the `grace_epoch` field. - The `content` value should follow a standard format. We leverage a similar format to what is described in the [BIP2](https://github.com/bitcoin/bips/blob/master/bip-0002.mediawiki#bip-format-and-structure) document: ```json @@ -44,6 +60,15 @@ Each proposal will be stored in a sub-key under the internal proposal address. T } ``` +The `ProposalType` imply different combinations of: + +- the optional wasm code attached to the proposal +- which actors should be allowed to vote (delegators and validators or validators only) +- the threshold to be used in the tally process +- the optional payload (memo) attached to the vote + +The correct logic to handle these different types will be hardcoded in protocol. We'll also rely on type checking to strictly enforce the correctness of a proposal given its type. These two approaches combined will prevent a user from deviating from the intended logic for a certain proposal type (e.g. providing a wasm code when it's not needed or allowing only validators to vote when also delegators should, etc...). More details on the specific types supported can be found in the [relative](#supported-proposal-types) section of this document. + `GovernanceAddress` parameters and global storage keys are: ``` @@ -73,33 +98,69 @@ The governance machinery also relies on a subkey stored under the `NAM` token ad This is to leverage the `NAM` VP to check that the funds were correctly locked. The governance subkey, `/\$GovernanceAddress/proposal/\$id/funds` will be used after the tally step to know the exact amount of tokens to refund or move to Treasury. +### Supported proposal types + +At the moment, Namada supports 3 types of governance proposals: + +```rust +pub enum ProposalType { + /// Carries the optional proposal code path + Custom(Option), + PGFCouncil, + ETHBridge, +} +``` + +`Custom` represents a generic proposal with the following properties: + +- Can carry a wasm code to be executed in case the proposal passes +- Allows both validators and delegators to vote +- Requires 2/3 of the total voting power to succeed +- Doesn't expect any memo attached to the votes + +`PGFCouncil` is a specific proposal to elect the council for _Public Goods Funding_: + +- Doesn't carry any wasm code +- Allows both validators and delegators to vote +- Requires 1/3 of the total voting power to vote for the same council +- Expect every vote to carry a memo in the form of a tuple `Set<(Set
, BudgetCap)>` + +`ETHBridge` is aimed at regulating actions on the bridge like the update of the Ethereum smart contracts or the withdrawing of all the funds from the `Vault` : + +- Doesn't carry any wasm code +- Allows only validators to vote +- Requires 2/3 of the validators' total voting power to succeed +- Expect every vote to carry a memo in the form of a tuple `(Action, Signature)` + ### GovernanceAddress VP -Just like Pos, also governance has his own storage space. The `GovernanceAddress` validity predicate task is to check the integrity and correctness of new proposals. A proposal, to be correct, must satisfy the following: + +Just like PoS, also governance has its own storage space. The `GovernanceAddress` validity predicate task is to check the integrity and correctness of new proposals. A proposal, to be correct, must satisfy the following: + - Mandatory storage writes are: - - counter - - author - - funds - - voting_start epoch - - voting_end epoch - - grace_epoch + - counter + - author + - type + - funds + - voting_start epoch + - voting_end epoch + - grace_epoch - Lock some funds >= `min_proposal_fund` - Contains a unique ID - Contains a start, end and grace Epoch - The difference between StartEpoch and EndEpoch should be >= `min_proposal_period`. - Should contain a text describing the proposal with length < `max_proposal_content_size` characters. -- Vote can be done only by a delegator or validator -- Validator can vote only in the initial 2/3 of the whole proposal duration (`end_epoch` - `start_epoch`) -- Due to the previous requirement, the following must be true,`(EndEpoch - StartEpoch) % 3 == 0` -- If defined, `proposalCode` should be the wasm bytecode representation of - the changes. This code is triggered in case the proposal has a position outcome. +- Vote can be done only by a delegator or validator (further constraints can be applied depending on the proposal type) +- If delegators are allowed to vote, than validators can vote only in the initial 2/3 of the whole proposal duration (`end_epoch` - `start_epoch`) +- Due to the previous requirement, the following must be true, `(EndEpoch - StartEpoch) % 3 == 0` +- If defined, `proposalCode` should be the wasm bytecode representation of the changes. This code is triggered in case the proposal has a position outcome. - The difference between `grace_epoch` and `end_epoch` should be of at least `min_proposal_grace_epochs` Once a proposal has been created, nobody can modify any of its fields. -If `proposal_code` is `Empty` or `None` , the proposal upgrade will need to be done via hard fork. +If `proposal_code` is `Empty` or `None`, the proposal upgrade will need to be done via hard fork, unless this is a specific type of proposal: in this case the protocol can directly apply the required changes. -It is possible to check the actual implementation [here](https://github.com/anoma/namada/blob/master/shared/src/ledger/governance/mod.rs#L69). +It is possible to check the actual implementation [here](https://github.com/anoma/namada/blob/main/shared/src/ledger/governance/mod.rs#L69). -Example of `proposalCode` could be: +Examples of `proposalCode` could be: - storage writes to change some protocol parameter - storage writes to restore a slash - storage writes to change a non-native vp @@ -108,91 +169,109 @@ This means that corresponding VPs need to handle these cases. ### Proposal Transactions -The proposal transaction will have the following structure, where `author` address will be the refund address. +The on-chain proposal transaction will have the following structure, where `author` address will be the refund address. ```rust -struct OnChainProposal { - id: u64 - content: Vec - author: Address - votingStartEpoch: Epoch - votingEndEpoch: Epoch - graceEpoch: Epoch - proposalCode: Option> +struct Proposal { + id: u64, + content: Vec, + author: Address, + r#type: ProposalType, + votingStartEpoch: Epoch, + votingEndEpoch: Epoch, + graceEpoch: Epoch, } ``` +The optional proposal wasm code will be embedded inside the `ProposalType` enum variants to better perform validation through type checking. + ### Vote transaction Vote transactions have the following structure: ```rust struct OnChainVote { - id: u64 - voter: Address - yay: bool + id: u64, + voter: Address, + yay: ProposalVote, } ``` Vote transaction creates or modifies the following storage key: ``` -/\$GovernanceAddress/proposal/\$id/vote/\$delegation_address/\$voter_address: Enum(yay|nay) +/\$GovernanceAddress/proposal/\$id/vote/\$delegation_address/\$voter_address: ProposalVote ``` -The storage key will only be created if the transaction is signed either by -a validator or a delegator. -Validators will be able to vote only for 2/3 of the total voting period, while delegators can vote until the end of the voting period. +where `ProposalVote` is an enum representing a `Yay` or `Nay` vote: the yay variant also contains the specific memo (if any) required for that proposal. + +The storage key will only be created if the transaction is signed either by a validator or a delegator. In case a vote misses a required memo or carries a memo with an invalid format, the vote will be discarded at validation time (VP) and it won't be written to storage. -If a delegator votes opposite to its validator, this will *override* the -corresponding vote of this validator (e.g. if a delegator has a voting power of 200 and votes opposite to the delegator holding these tokens, than 200 will be subtracted from the voting power of the involved validator). +If delegators are allowed to vote, validators will be able to vote only for 2/3 of the total voting period, while delegators can vote until the end of the voting period. + +If a delegator votes differently than its validator, this will *override* the corresponding vote of this validator (e.g. if a delegator has a voting power of 200 and votes opposite to the delegator holding these tokens, than 200 will be subtracted from the voting power of the involved validator). As a small form of space/gas optimization, if a delegator votes accordingly to its validator, the vote will not actually be submitted to the chain. This logic is applied only if the following conditions are satisfied: - The transaction is not being forced -- The vote is submitted in the last third of the voting period (the one exclusive to delegators). This second condition is necessary to prevent a validator from changing its vote after a delegator vote has been submitted, effectively stealing the delegator's vote. +- The vote is submitted in the last third of the voting period (the one exclusive to delegators). This second condition is necessary to prevent a validator from changing its vote after a delegator vote has been submitted, effectively stealing the delegator's vote. ### Tally -At the beginning of each new epoch (and only then), in the `FinalizeBlock` event, tallying will occur for all the proposals ending at this epoch (specified via the `grace_epoch` field of the proposal). -The proposal has a positive outcome if 2/3 of the staked `NAM` total is voting `yay`. Tallying is computed with the following rules: -- Sum all the voting power of validators that voted `yay` -- For any validator that voted `yay`, subtract the voting power of any delegation that voted `nay` -- Add voting power for any delegation that voted `yay` (whose corresponding validator didn't vote `yay`) -- If the aformentioned sum divided by the total voting power is >= `2/3`, the proposal outcome is positive otherwise negative. +At the beginning of each new epoch (and only then), in the `finalize_block` function, tallying will occur for all the proposals ending at this epoch (specified via the `grace_epoch` field of the proposal). +The proposal has a positive outcome if the threshold specified by the `ProposalType` is reached. This means that enough `yay` votes must have been collected: the threshold is relative to the staked `NAM` total. + +Tallying, when no `memo` is required, is computed with the following rules: + +1. Sum all the voting power of validators that voted `yay` +2. For any validator that voted `yay`, subtract the voting power of any delegation that voted `nay` +3. Add voting power for any delegation that voted `yay` (whose corresponding validator didn't vote `yay`) +4. If the aforementioned sum divided by the total voting power is greater or equal to the threshold set by `ProposalType`, the proposal outcome is positive otherwise negative. + +If votes carry a `memo`, instead, the `yay` votes must be evaluated net of it. The protocol will implement the correct logic to make sense of these memos and compute the tally correctly: + +1. Sum all the voting power of validators that voted `yay` with a specific memo, effectively splitting the `yay` votes into different subgroups +2. For any validator that voted `yay`, subtract the voting power of any delegation that voted `nay` or voted `yay` with a different memo +3. Add voting power for any delegation that voted `yay` (whose corresponding validator voted `nay` or `yay` with a different memo) +4. From the `yay` subgroups select the one that got the greatest amount of voting power +5. If the aforementioned voting power divided by the total voting power is greater or equal to the threshold set by `ProposalType`, the proposal outcome is positive otherwise negative. -All the computation above must be made at the epoch specified in the `start_epoch` field of the proposal. +All the computation will be done on data collected at the epoch specified in the `end_epoch` field of the proposal. -It is possible to check the actual implementation [here](https://github.com/anoma/namada/blob/master/shared/src/ledger/governance/utils.rs#L68). +It is possible to check the actual implementation [here](https://github.com/anoma/namada/blob/main/shared/src/ledger/governance/utils.rs#L68). ### Refund and Proposal Execution mechanism -Together with tallying, in the first block at the beginning of each epoch, in the `FinalizeBlock` event, the protocol will manage the execution of accepted proposals and refunding. For each ended proposal with a positive outcome, it will refund the locked funds from `GovernanceAddress` to the proposal author address (specified in the proposal `author` field). For each proposal that has been rejected, instead, the locked funds will be moved to the `SlashFundAddress`. Moreover, if the proposal had a positive outcome and `proposal_code` is defined, these changes will be executed right away. -To summarize the execution of governance in the `FinalizeBlock` event: -If the proposal outcome is positive and current epoch is equal to the proposal `grace_epoch`, in the `FinalizeBlock` event: +Together with tallying, in the first block at the beginning of each epoch, in the `finalize_block` function, the protocol will manage the execution of accepted proposals and refunding. For each ended proposal with a positive outcome, it will refund the locked funds from `GovernanceAddress` to the proposal author address (specified in the proposal `author` field). For each proposal that has been rejected, instead, the locked funds will be moved to the `SlashFundAddress`. Moreover, if the proposal had a positive outcome and `proposal_code` is defined, these changes will be executed right away. +To summarize the execution of governance in the `finalize_block` function: + +If the proposal outcome is positive and current epoch is equal to the proposal `grace_epoch`, in the `finalize_block` function: - transfer the locked funds to the proposal `author` - execute any changes specified by `proposal_code` -In case the proposal was rejected or if any error, in the `FinalizeBlock` event: +In case the proposal was rejected or if any error, in the `finalize_block` function: - transfer the locked funds to `SlashFundAddress` The result is then signaled by creating and inserting a [`Tendermint Event`](https://github.com/tendermint/tendermint/blob/ab0835463f1f89dcadf83f9492e98d85583b0e71/docs/spec/abci/abci.md#events. - ## SlashFundAddress + Funds locked in `SlashFundAddress` address should be spendable only by proposals. ### SlashFundAddress storage + ``` /\$SlashFundAddress/?: Vec ``` The funds will be stored under: + ``` /\$NAMAddress/balance/\$SlashFundAddress: u64 ``` ### SlashFundAddress VP + The slash_fund validity predicate will approve a transfer only if the transfer has been made by the protocol (by checking the existence of `/\$GovernanceAddress/pending/\$proposal_id` storage key) It is possible to check the actual implementation [here](https://github.com/anoma/namada/blob/main/shared/src/ledger/slash_fund/mod.rs#L70). @@ -200,8 +279,10 @@ It is possible to check the actual implementation [here](https://github.com/anom ## Off-chain protocol ### Create proposal -A CLI command to create a signed JSON representation of the proposal. The + +A CLI command to create a signed JSON representation of the proposal. The JSON will have the following structure: + ``` { content: Base64>, @@ -212,12 +293,13 @@ JSON will have the following structure: } ``` -The signature is produced over the hash of the concatenation of: `content`, `author`, `votingStart` and `votingEnd`. +The signature is produced over the hash of the concatenation of: `content`, `author`, `votingStart` and `votingEnd`. Proposal types are not supported off-chain. ### Create vote -A CLI command to create a signed JSON representation of a vote. The JSON +A CLI command to create a signed JSON representation of a vote. The JSON will have the following structure: + ``` { proposalHash: Base64>, @@ -227,9 +309,10 @@ will have the following structure: } ``` -The proposalHash is produced over the concatenation of: `content`, `author`, `votingStart`, `votingEnd`, `voter` and `vote`. +The proposalHash is produced over the concatenation of: `content`, `author`, `votingStart`, `votingEnd`, `voter` and `vote`. Vote memos are not supported off-chain. ### Tally + Same mechanism as [on chain](#tally) tally but instead of reading the data from storage it will require a list of serialized json votes. ## Interfaces diff --git a/documentation/specs/src/economics.md b/documentation/specs/src/economics.md index bf7346b63a4..f18c7d70233 100644 --- a/documentation/specs/src/economics.md +++ b/documentation/specs/src/economics.md @@ -1,3 +1,3 @@ ## Economics -Namada's economic model is based around a single native token, NAM, which is controlled by the protocol. Users pay transaction fees in NAM and other tokens (see [fee system](./economics/fee-system.md)), so demand for NAM can be expected to track demand for block space. On the supply side, the protocol mints NAM at a fixed maximum per-annum rate based on a fraction of the current supply (see [inflation system](./economics/inflation-system.md)), which is directed to three areas of protocol subsidy: [proof-of-stake](./economics/proof-of-stake.md), [shielded pool incentives](./economics/shielded-pool-incentives.md), and [public-goods funding](./economics/public-goods-funding.md). Inflation rates for these three areas are adjusted independently (the first two on PD controllers and the third based on funding decisions) and excess tokens are slowly burned. \ No newline at end of file +Namada users pay transaction fees in NAM and other tokens (see [fee system](./economics/fee-system.md) and [governance](./base-ledger/governance.md)), so demand for NAM can be expected to track demand for block space. On the supply side, the protocol mints NAM at a fixed maximum per-annum rate based on a fraction of the current supply (see [inflation system](./economics/inflation-system.md)), which is directed to three areas of protocol subsidy: [proof-of-stake](./economics/proof-of-stake.md), [shielded pool incentives](./economics/shielded-pool-incentives.md), and [public-goods funding](./economics/public-goods-funding.md). Inflation rates for these three areas are adjusted independently (the first two on PD controllers and the third based on funding decisions) and excess tokens are slowly burned. \ No newline at end of file diff --git a/documentation/specs/src/economics/proof-of-stake.md b/documentation/specs/src/economics/proof-of-stake.md index 808f02e585b..3102616e655 100644 --- a/documentation/specs/src/economics/proof-of-stake.md +++ b/documentation/specs/src/economics/proof-of-stake.md @@ -4,7 +4,7 @@ This section of the specification describes the proof-of-stake mechanism of Nama This section is split into three subcomponents: the [bonding mechanism](./proof-of-stake/bonding-mechanism.md), [reward distribution](./proof-of-stake/reward-distribution.md), and [cubic slashing](./proof-of-stake/cubic-slashing.md). -## Introduction +## Context Blockchain systems rely on economic security (directly or indirectly) to prevent @@ -13,7 +13,7 @@ for actors to behave according to protocol. The aim is that economic incentives promote correct long-term operation of the system and economic punishments discourage diverging from correct protocol execution either by mistake or -with the intent of carrying out attacks. Many PoS blockcains rely on the 1/3 Byzantine rule, where they make the assumption the adversary cannot control more 2/3 of the total stake or 2/3 of the actors. +with the intent of carrying out attacks. Many PoS blockchains rely on the 1/3 Byzantine rule, where they make the assumption the adversary cannot control more 2/3 of the total stake or 2/3 of the actors. ## Goals of Rewards and Slashing: Liveness and Security diff --git a/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md b/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md index ab0449e026e..349e766defe 100644 --- a/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md +++ b/documentation/specs/src/economics/proof-of-stake/bonding-mechanism.md @@ -1,9 +1,5 @@ # Bonding mechanism -## Epoch - -An epoch is a range of blocks or time that is defined by the base ledger and made available to the PoS system. This document assumes that epochs are identified by consecutive natural numbers. All the data relevant to PoS are [associated with epochs](#epoched-data). - ### Epoched data Epoched data is data associated with a specific epoch that is set in advance. @@ -16,16 +12,11 @@ The data relevant to the PoS system in the ledger's state are epoched. Each data Changes to the epoched data do not take effect immediately. Instead, changes in epoch `n` are queued to take effect in the epoch `n + pipeline_length` for most cases and `n + unboding_length` for [unbonding](#unbond) actions. Should the same validator's data or same bonds (i.e. with the same identity) be updated more than once in the same epoch, the later update overrides the previously queued-up update. For bonds, the token amounts are added up. Once the epoch `n` has ended, the queued-up updates for epoch `n + pipeline_length` are final and the values become immutable. -## Entities - -- [Validator](#validator): An account with a public consensus key, which may participate in producing blocks and governance activities. A validator may not also be a delegator. -- [Delegator](#delegator): An account that delegates some tokens to a validator. A delegator may not also be a validator. - Additionally, any account may submit evidence for [a slashable misbehaviour](#slashing). ### Validator -A validator must have a public consensus key. Additionally, it may also specify optional metadata fields (TBA). +A validator must have a public consensus key. A validator may be in one of the following states: - *inactive*: @@ -115,7 +106,7 @@ Once an offence has been reported: - [cubic slashing](./cubic-slashing.md): escalated slashing -Instead of absolute values, validators' total bonded token amounts and bonds' and unbonds' token amounts are stored as their deltas (i.e. the change of quantity from a previous epoch) to allow distinguishing changes for different epoch, which is essential for determining whether tokens should be slashed. However, because slashes for a fault that occurred in epoch `n` may only be applied before the beginning of epoch `n + unbonding_length`, in epoch `m` we can sum all the deltas of total bonded token amounts and bonds and unbond with the same source and validator for epoch equal or less than `m - unboding_length` into a single total bonded token amount, single bond and single unbond record. This is to keep the total number of total bonded token amounts for a unique validator and bonds and unbonds for a unique pair of source and validator bound to a maximum number (equal to `unbonding_length`). +Instead of absolute values, validators' total bonded token amounts and bonds' and unbonds' token amounts are stored as their deltas (i.e. the change of quantity from a previous epoch) to allow distinguishing changes for different epoch, which is essential for determining whether tokens should be slashed. Slashes for a fault that occurred in epoch `n` may only be applied before the beginning of epoch `n + unbonding_length`. For this reason, in epoch `m` we can sum all the deltas of total bonded token amounts and bonds and unbond with the same source and validator for epoch equal or less than `m - unboding_length` into a single total bonded token amount, single bond and single unbond record. This is to keep the total number of total bonded token amounts for a unique validator and bonds and unbonds for a unique pair of source and validator bound to a maximum number (equal to `unbonding_length`). To disincentivize validators misbehaviour in the PoS system a validator may be slashed for any fault that it has done. An evidence of misbehaviour may be submitted by any account for a fault that occurred in epoch `n` anytime before the beginning of epoch `n + unbonding_length`. @@ -123,6 +114,11 @@ A valid evidence reduces the validator's total bonded token amount by the slash The invariant is that the sum of amounts that may be withdrawn from a misbehaving validator must always add up to the total bonded token amount. +## Initialization + +An initial validator set with self-bonded token amounts must be given on system initialization. + +This set is used to pre-compute epochs in the genesis block from epoch `0` to epoch `pipeline_length - 1`. ## System parameters @@ -296,8 +292,4 @@ struct Slash { } ``` -## Initialization - -An initial validator set with self-bonded token amounts must be given on system initialization. -This set is used to pre-compute epochs in the genesis block from epoch `0` to epoch `pipeline_length - 1`. diff --git a/documentation/specs/src/economics/proof-of-stake/cubic-slashing.md b/documentation/specs/src/economics/proof-of-stake/cubic-slashing.md index 4d4ac075230..d63347308ef 100644 --- a/documentation/specs/src/economics/proof-of-stake/cubic-slashing.md +++ b/documentation/specs/src/economics/proof-of-stake/cubic-slashing.md @@ -31,4 +31,16 @@ calculateSlashRate slashes = Validator can later submit a transaction to unjail themselves after a configurable period. When the transaction is applied and accepted, the validator updates its state to "candidate" and is added back to the validator set starting at the epoch at pipeline offset (active or inactive, depending on its voting power). -At present, funds slashed are sent to the governance treasury. In the future we could potentially reward the slash discoverer with part of the slash, for which some sort of commit-reveal mechanism will be required to prevent front-running. +At present, funds slashed are sent to the governance treasury. + +## Slashes + +Slashes should lead to punishment for delegators who were contributing voting power to the validator at the height of the infraction, _as if_ the delegations were iterated over and slashed individually. + +This can be implemented as a negative inflation rate for a particular block. + +Instant redelegation is not supported. Redelegations must wait the unbonding period. + + \ No newline at end of file diff --git a/documentation/specs/src/economics/proof-of-stake/reward-distribution.md b/documentation/specs/src/economics/proof-of-stake/reward-distribution.md index 70f662f97a4..730e423e767 100644 --- a/documentation/specs/src/economics/proof-of-stake/reward-distribution.md +++ b/documentation/specs/src/economics/proof-of-stake/reward-distribution.md @@ -152,16 +152,5 @@ The commission rate $c_V(e)$ is the same for all delegations to a validator $V$ While rewards are given out at the end of every epoch, voting power is only updated after the pipeline offset. According to the [proof-of-stake system](bonding-mechanism.md#epoched-data), at the current epoch `e`, the validator sets an only be updated for epoch `e + pipeline_offset`, and it should remain unchanged from epoch `e` to `e + pipeline_offset - 1`. Updating voting power in the current epoch would violate this rule. -## Slashes - -Slashes should lead to punishment for delegators who were contributing voting power to the validator at the height of the infraction, _as if_ the delegations were iterated over and slashed individually. - -This can be implemented as a negative inflation rate for a particular block. - -Instant redelegation is not supported. Redelegations must wait the unbonding period. - - diff --git a/documentation/specs/src/economics/public-goods-funding.md b/documentation/specs/src/economics/public-goods-funding.md index 96e82dfc93a..bfb779b1fa2 100644 --- a/documentation/specs/src/economics/public-goods-funding.md +++ b/documentation/specs/src/economics/public-goods-funding.md @@ -10,7 +10,7 @@ There is a lot of existing research into public-goods funding to which justice c Namada instantiates a dual proactive/retroactive public-goods funding model, stewarded by a public-goods council elected by limited liquid democracy. -This proposal requires the following protocol components: +This requires the following protocol components: - Limited liquid democracy / targeted delegation: Namada's current voting mechanism is altered to add targeted delegation. By default, each delegator delegates their vote in governance to their validator, but they can set an alternative governance delegate who can instead vote on their behalf (but whose vote can be overridden as usual). Validators can also set governance delegates, in which case those delegates can vote on their behalf, and on the behalf of all delegators to that validator who do not override the vote, unless the validator overrides the vote. This is a limited form of liquid democracy which could be extended in the future. - Funding council: bi-annually (every six months), Namada governance elects a public goods funding council by stake-weighted approval vote (see below). Public goods funding councils run as groups. The public goods funding council decides according to internal decision-making procedures (practically probably limited to a k-of-n multisignature) how to allocate continuous funding and retroactive funding during their term. Namada genesis includes an initial funding council, and the next election will occur six months after launch. - Continuous funding: Namada prints an amount of inflation fixed on a percentage basis dedicated to continuous funding. Each quarter, the public goods funding council selects recipients and amounts (which in total must receive all of the funds, although they could burn some) and submits this list to the protocol. Inflation is distributed continuously by the protocol to these recipients during that quarter. diff --git a/documentation/specs/src/further-reading.md b/documentation/specs/src/further-reading.md index 971007c5307..d92add791f3 100644 --- a/documentation/specs/src/further-reading.md +++ b/documentation/specs/src/further-reading.md @@ -1,3 +1,15 @@ ## Further reading -Thanks for reading! You can find further information about the state of Namada at [namada.net](https://namada.net). \ No newline at end of file +Thanks for reading! You can find further information about the project below: + +[The state of Namada](https://namada.net). +[The state of Anoma](https://anoma.net/) +[Anoma source code](https://github.com/anoma/anoma) +[Namada source code](https://github.com/anoma/namada) +[Anoma Community](https://anoma.net/community) +[Heliax](https://heliax.dev/) +[Anoma Medium page](https://medium.com/anomanetwork) +[Namada Docs](https://docs.namada.net/) +[Anoma Discord](https://discord.com/invite/anoma) +[Namada Twitter](https://twitter.com/namadanetwork) +[Anoma Twitter](https://twitter.com/anomanetwork) \ No newline at end of file diff --git a/documentation/specs/src/interoperability/ethereum-bridge.md b/documentation/specs/src/interoperability/ethereum-bridge.md index 4d626eb3f34..74d44429dfc 100644 --- a/documentation/specs/src/interoperability/ethereum-bridge.md +++ b/documentation/specs/src/interoperability/ethereum-bridge.md @@ -36,6 +36,3 @@ Ethereum side. - [ICS20](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) - [Rainbow Bridge contracts](https://github.com/aurora-is-near/rainbow-bridge/tree/master/contracts) - [IBC in Solidity](https://github.com/hyperledger-labs/yui-ibc-solidity) - -Operational notes: -1. We will bundle the Ethereum full node with the `namada` daemon executable. diff --git a/documentation/specs/src/interoperability/ibc.md b/documentation/specs/src/interoperability/ibc.md index 198266dd93b..0848243d339 100644 --- a/documentation/specs/src/interoperability/ibc.md +++ b/documentation/specs/src/interoperability/ibc.md @@ -6,29 +6,27 @@ ## IBC transaction An IBC transaction [`tx_ibc.wasm`](https://github.com/anoma/anoma/blob/fd4b7ab36929f47369ae82c82966891cb0ccc625/wasm/wasm_source/src/lib.rs#L224-L233) is provided. We have to set an IBC message to the transaction data corresponding to execute an IBC operation. -The transaction decodes the data to an IBC message and handles IBC-related data, e.g. it makes a new connection ID and writes a new connection end for `MsgConnectionOpenTry`. The operations are implemented in [`IbcActions`](https://docs.anoma.network/master/rustdoc/anoma/ledger/ibc/handler/trait.IbcActions.html).The transaction doesn't check the validity for the state changes. IBC validity predicate is in charge of the validity. +The transaction decodes the data to an IBC message and handles IBC-related data, e.g. it makes a new connection ID and writes a new connection end for `MsgConnectionOpenTry`. The operations are implemented in [`IbcActions`](https://github.com/anoma/anoma/blob/50b5e77f04a9afc036656353335bd232fcdba8a7/vm_env/src/ibc.rs).The transaction doesn't check the validity for the state changes. IBC validity predicate is in charge of the validity. ## IBC validity predicate -[IBC validity predicate](https://docs.anoma.network/master/rustdoc/anoma/ledger/ibc/vp/struct.Ibc.html#impl-NativeVp) checks if an IBC-related transaction satisfies IBC protocol. When an IBC-related transaction is executed, i.e. a transaction changes the state of the key that contains [`InternalAddress::Ibc`](https://docs.anoma.network/master/rustdoc/anoma/types/address/enum.InternalAddress.html#variant.Ibc), IBC validity predicate (one of the native validity predicates) is executed. For example, if an IBC connection end is created in the transaction, IBC validity predicate validates the creation. If the creation with `MsgConnectionOpenTry` is invalid, e.g. the counterpart connection end doesn't exist, the validity predicate makes the transaction fail. +[IBC validity predicate](https://docs.anoma.network/master/rustdoc/anoma/ledger/ibc/vp/struct.Ibc.html#impl-NativeVp) checks if an IBC-related transaction satisfies IBC protocol. When an IBC-related transaction is executed, i.e. a transaction changes the state of the key that contains [`InternalAddress::Ibc`](https://github.com/anoma/anoma/blob/50b5e77f04a9afc036656353335bd232fcdba8a7/shared/src/types/address.rs), IBC validity predicate (one of the native validity predicates) is executed. For example, if an IBC connection end is created in the transaction, IBC validity predicate validates the creation. If the creation with `MsgConnectionOpenTry` is invalid, e.g. the counterpart connection end doesn't exist, the validity predicate makes the transaction fail. ## Fungible Token Transfer The transfer of fungible tokens over an IBC channel on separate chains is defined in [ICS20](https://github.com/cosmos/ibc/blob/master/spec/app/ics-020-fungible-token-transfer/README.md). -In Anoma, the sending tokens is triggered by a transaction having [MsgTransfer](https://github.com/informalsystems/ibc-rs/blob/0a952b295dbcf67bcabb79ce57ce92c9c8d7e5c6/modules/src/applications/ics20_fungible_token_transfer/msgs/transfer.rs#L20-L37) as transaction data. A packet including [`FungibleTokenPacketData`](https://docs.anoma.network/master/rustdoc/anoma/types/ibc/data/struct.FungibleTokenPacketData.html) is made from the message in the transaction execution. +In Anoma, the sending tokens is triggered by a transaction having [MsgTransfer](https://github.com/informalsystems/ibc-rs/blob/0a952b295dbcf67bcabb79ce57ce92c9c8d7e5c6/modules/src/applications/ics20_fungible_token_transfer/msgs/transfer.rs#L20-L37) as transaction data. A packet including [`FungibleTokenPacketData`](https://github.com/anoma/anoma/blob/50b5e77f04a9afc036656353335bd232fcdba8a7/shared/src/types/ibc/data.rs) is made from the message in the transaction execution. Anoma chain receives the tokens by a transaction having [MsgRecvPacket](https://github.com/informalsystems/ibc-rs/blob/0a952b295dbcf67bcabb79ce57ce92c9c8d7e5c6/modules/src/core/ics04_channel/msgs/recv_packet.rs#L19-L23) which has the packet including `FungibleTokenPacketData`. The sending and receiving tokens in a transaction are validated by not only -IBC validity predicate but also [IBC token validity predicate](https://docs. -anoma.network/master/rustdoc/anoma/ledger/ibc/vp/struct.IbcToken. -html#impl-NativeVp). IBC validity predicate validates if sending and receiving the packet is proper. IBC token validity predicate is also one of the native validity predicates and checks if the token transfer is valid. If the transfer is not valid, e.g. an unexpected amount is minted, the validity predicate makes the transaction fail. +IBC validity predicate but also [IBC token validity predicate](https://github.com/anoma/anoma/blob/50b5e77f04a9afc036656353335bd232fcdba8a7/shared/src/ledger/ibc/vp/token.rs). IBC validity predicate validates if sending and receiving the packet is proper. IBC token validity predicate is also one of the native validity predicates and checks if the token transfer is valid. If the transfer is not valid, e.g. an unexpected amount is minted, the validity predicate makes the transaction fail. A transaction escrowing/unescrowing a token changes the escrow account's balance of the token. The key is `{token_addr}/balance/{escrow_addr}`. A transaction burning a token changes the burn account's balance of the token. The key is `{token_addr}/balance/BURN_ADDR`. A transaction minting a token changes the mint account's balance of the token. The key is `{token_addr} -/balance/MINT_ADDR`. `{escrow_addr}`, `{BURN_ADDR}`, and `{MINT_ADDR}` are addresses of [`InternalAddress`](https://docs.anoma.network/master/rustdoc/anoma/types/address/enum.InternalAddress.html). When these addresses are included in the changed keys after transaction execution, IBC token validity predicate is executed. +/balance/MINT_ADDR`. `{escrow_addr}`, `{BURN_ADDR}`, and `{MINT_ADDR}` are addresses of [`InternalAddress`](https://github.com/anoma/anoma/blob/50b5e77f04a9afc036656353335bd232fcdba8a7/shared/src/types/address.rs). When these addresses are included in the changed keys after transaction execution, IBC token validity predicate is executed. ## IBC message diff --git a/documentation/specs/src/introduction.md b/documentation/specs/src/introduction.md index 6f4971e049b..0d4f83fd33a 100644 --- a/documentation/specs/src/introduction.md +++ b/documentation/specs/src/introduction.md @@ -2,31 +2,28 @@ Welcome to the Namada specifications! +## What is Namada? + Namada is a sovereign proof-of-stake blockchain, using Tendermint BFT consensus, that enables multi-asset private transfers for any native or non-native asset -using a multi-asset shielded pool derived from the Sapling circuit. Namada features -full IBC protocol support, a natively integrated Ethereum bridge, a modern proof-of-stake -system with automatic reward compounding and cubic slashing, a stake-weighted governance -signalling mechanism, and a proactive/retroactive public goods funding system. -Users of shielded transfers are rewarded for their contributions -to the privacy set in the form of native protocol tokens. A multi-asset shielded transfer wallet -is provided in order to facilitate safe and private user interaction with the protocol. +using a [multi-asset shielded pool](https://research.metastate.dev/multi-asset_shielded_pool/) derived from the [Sapling circuit](https://z.cash/upgrade/sapling/). Namada features full IBC protocol support, a natively integrated Ethereum bridge, a modern proof-of-stakesystem with automatic reward compounding and cubic slashing, a stake-weighted governance signalling mechanism, and a proactive/retroactive public goods funding system. Users of shielded transfers are rewarded for their contributions to the privacy set in the form of native protocol tokens. A multi-asset shielded transfer wallet is provided in order to facilitate safe and private user interaction with the protocol. + +You can learn more about Namada [here](https://medium.com/anomanetwork/introducing-namada-shielded-transfers-with-any-assets-dce2e579384c). +### What is Anoma? -### How does Namada relate to Anoma? +The Anoma protocol is designed to facilitate the operation of networked fractal instances, which intercommunicate but can utilise varied state machines and security models. +A fractal instance is an instance of the Anoma consensus and execution protocols operated by a set of networked validators. Anoma’s fractal instance architecture is an attempt to build a platform which is architecturally homogeneous and with a heterogeneous security model. Thus, different fractal instances may specialise in different tasks and serve different communities. Privacy should be default and inherent in the systems we use for transacting. -Namada is the first fractal instance launched as part of the Anoma ecosystem. +### How does Namada relate to Anoma? -The Anoma protocol is designed to facilitate the operation of networked fractal instances, -which intercommunicate but can utilise varied state machines and security models. Different -fractal instances may specialise in different tasks and serve different communities. The Namada -instance will be the first such fractal instance, and it will be focused exclusively on the use-case of private asset transfers. +The Namada instance will be the first such fractal instance, and it will be focused exclusively on the use-case of private asset transfers. Namada is a helpful stepping stone to finalise, test, and launch a protocol version that is simpler than the full +Anoma protocol but still encapsulates a unified and useful set of features. ### Raison d'être -Safe and user-friendly multi-asset privacy doesn't yet exist in the blockchain ecosystem. -Up until now users have had the choice of either a sovereign chain that reissues assets (e.g. Zcash) -or a privacy preserving solution built on an existing smart contract chain (e.g. Tornado Cash on -Ethereum). Both have large trade-offs: in the former case, users don't have +Privacy should be default and inherent in the systems we use for transacting. Yet safe and user-friendly multi-asset privacy doesn't yet exist in the blockchain ecosystem. +Up until now users have had the choice of either a sovereign chain that reissues assets (e.g. [Zcash](https://z.cash/)) +or a privacy preserving solution built on an existing smart contract chain. Both have large trade-offs: in the former case, users don't have assets that they actually want to transact with, and in the latter case, the restrictions of existing platforms mean that users leak a ton of metadata and the protocols are expensive and clunky to use. @@ -36,15 +33,7 @@ and fungible or non-fungible assets (such as ERC20 tokens) sent over a custom Et reduces transfer costs and streamlines UX as much as possible. Once assets are on Namada, shielded transfers are cheap and all assets contribute to the same anonymity set. -Namada is also a helpful stepping stone to finalise, test, -and launch a protocol version that is simpler than the full -Anoma protocol but still encapsulates a unified and useful -set of features. There are reasons to expect that it may -make sense for a fractal instance focused exclusively on -shielded transfers to exist in the long-term, as it can -provide throughput and user-friendliness guarantees which -are more difficult to provide with a more general platform. -Namada is designed to be such an instance. +Users on Namada can earn rewards, retain privacy of assets, and contribute to the overall privacy set. ### Layout of this specification diff --git a/documentation/specs/src/masp.md b/documentation/specs/src/masp.md index 4e1fadc0878..d1a87e9b8b2 100644 --- a/documentation/specs/src/masp.md +++ b/documentation/specs/src/masp.md @@ -7,4 +7,5 @@ See the following documents: - [Ledger integration](./masp/ledger-integration.md) - [Asset type schema](./masp/asset-type.md) - [Burn and mint](./masp/burn-and-mint.md) -- [Convert circuit](./masp/convert-circuit.md) \ No newline at end of file +- [Convert circuit](./masp/convert-circuit.md) +- [Shielded pool incentives](./economics/shielded-pool-incentives.md) \ No newline at end of file diff --git a/documentation/specs/src/masp/asset-type.md b/documentation/specs/src/masp/asset-type.md index 7504c7ef0f8..bc6c701698a 100644 --- a/documentation/specs/src/masp/asset-type.md +++ b/documentation/specs/src/masp/asset-type.md @@ -2,22 +2,22 @@ MASP notes carry balances that are some positive integer amount of an asset type. Per both the MASP specification and the implementation, the -asset *identifier* is an 32-byte Blake2s hash of an arbitrary asset +asset *identifier* is an 32-byte [Blake2s hash](https://www.blake2.net/) of an arbitrary asset *name* string, although the full 32-byte space is not used because the identifier must itself hash to an elliptic curve point (currently guaranteed by incrementing a nonce until the hash is a curve point). The final curve point is the asset *type* proper, used in computations. The following is a schema for the arbitrary asset name string intended -to support various uses; at least fungible tokens and NFTs, but possibly -others. +to support various uses - currently fungible tokens and NFTs, but possibly +others in future. The asset name string is built up from a number of segments, joined by a separator. We use `/` as the separator. Segments may be one of the following: -- **Controlling address** segment: an Anoma address which controls the +- **Controlling address** segment: a Namada address which controls the asset. For example, this is the fungible token address for a fungible token. This segment must be present, and must be first; it should in theory be an error to transparently transact in assets of this type diff --git a/documentation/specs/src/masp/burn-and-mint.md b/documentation/specs/src/masp/burn-and-mint.md index f66b8e28eba..2c9bb6e3b5c 100644 --- a/documentation/specs/src/masp/burn-and-mint.md +++ b/documentation/specs/src/masp/burn-and-mint.md @@ -62,7 +62,7 @@ It is also critical not to allow cycles. For example, if $\{(A_1, -1), (A_2, 2)\ It may theoretically be possible to implement similar mechanisms with only the existing Spend and Output circuits. For example, a Merkle tree of many Notes could be created with asset generator $[-1] vb_1 + vb_2$ and many different values, allowing anyone to Spend these public Notes, which will only balance if proper amounts of asset type 1 are Spent and asset type 2 are Output. -However, the Nullifier integrity check of the Spend circuit reveals the nullifier of each of these Notes, which removes the privacy of the conversion as the public nullifier is linkable to the allowed conversion. In addition, each Note has a fixed value, preventing arbitrary value conversions. +However, the Nullifier integrity check of the Spend circuit reveals the nullifier of each of these Notes. This removes the privacy of the conversion as the public nullifier is linkable to the allowed conversion. In addition, each Note has a fixed value, preventing arbitrary value conversions. ## Conclusion diff --git a/documentation/specs/src/masp/ledger-integration.md b/documentation/specs/src/masp/ledger-integration.md index 0eed1d7d805..0f4f0cabd84 100644 --- a/documentation/specs/src/masp/ledger-integration.md +++ b/documentation/specs/src/masp/ledger-integration.md @@ -4,32 +4,31 @@ The overall aim of this integration is to have the ability to provide a multi-asset shielded pool following the MASP spec as an account on the -current Anoma blockchain implementation. +current Namada blockchain implementation. -## Shielded pool VP +## Shielded pool validity predicate (VP) -The shielded value pool can be an Anoma "established account" with a +The shielded value pool can be an Namada established account with a validity predicate which handles the verification of shielded transactions. Similarly to zcash, the asset balance of the shielded pool itself is transparent - that is, from the transparent perspective, the MASP is just an account holding assets. The shielded pool VP has the following functions: -- Accept only valid transactions involving assets moving in or out of +- Accepts only valid transactions involving assets moving in or out of the pool. -- Accept valid shielded-to-shielded transactions, which don't move - assets from the perspective of transparent Anoma. -- Publish the note commitment and nullifier reveal Merkle trees. +- Accepts valid shielded-to-shielded transactions, which don't move + assets from the perspective of transparent Namada. +- Publishes the note commitment and nullifier reveal Merkle trees. To make this possible, the host environment needs to provide verification primitives to VPs. One possibility is to provide a single -high-level "verify transaction output descriptions and proofs" -operation, but another is to provide cryptographic functions in the host +high-level operation to verify transaction output descriptions and proofs, but another is to provide cryptographic functions in the host environment and implement the verifier as part of the VP. -The shielded pool needs the ability to update the commitment and -nullifier Merkle trees as it receives transactions. This may possibly be -achievable via the temporary storage mechanism added for IBC, with the +In future, the shielded pool will be able to update the commitment and +nullifier Merkle trees as it receives transactions. This could likely be +achieved via the temporary storage mechanism added for IBC, with the trees finalized with each block. The input to the VP is the following set of state changes: @@ -37,7 +36,7 @@ The input to the VP is the following set of state changes: - updates to the shielded pool's asset balances - new encrypted notes - updated note and nullifier tree states (partial, because we only have - the last block's anchor?) + the last block's anchor) and the following data which is ancillary from the ledger's perspective: @@ -71,7 +70,7 @@ struct OutputDescription { c_enc: [u8; ENC_CIPHERTEXT_SIZE], // Encrypted note key recovery ciphertext c_out: [u8; OUT_CIPHERTEXT_SIZE], - // Zero-knowledge proof of the new encrypted note's location (?) + // Zero-knowledge proof of the new encrypted note's location zkproof: Proof, } ``` @@ -79,7 +78,7 @@ struct OutputDescription { Given these inputs: The VP must verify the proofs for all spend and output descriptions -(`bellman::groth16`), as well as the signature for spend notes. +([`bellman::groth16`](https://docs.rs/bellman/latest/bellman/groth16/index.html)), as well as the signature for spend notes. Encrypted notes from output descriptions must be published in the storage so that holders of the viewing key can view them; however, the @@ -89,15 +88,14 @@ Nullifiers and commitments must be appended to their respective Merkle trees in the VP's storage as well, which is a transaction-level rather than a block-level state update. -Additionally to the individual spend and output description +In addition to the individual spend and output description verifications, the final transparent asset value change described in the -transaction must equal the pool asset value change, and as an additional -sanity check, the pool's balance of any asset may not end up negative -(this may already be impossible?). (needs more input) +transaction must equal the pool asset value change. As an additional +sanity check, the pool's balance of any asset may not end up negative. NB: Shielded-to-shielded transactions in an asset do not, from the ledger's perspective, transact in that asset; therefore, the asset's own -VP is not run as described above, and cannot be because the shielded +VP cannot run as described above because the shielded pool is asset-hiding. ## Client capabilities @@ -116,7 +114,7 @@ if any, and proof data computed offline by the client. The client and wallet must be extended to support the shielded pool and the cryptographic operations needed to interact with it. From the -perspective of the transparent Anoma protocol, a shielded transaction is +perspective of the transparent Namada protocol, a shielded transaction is just a data write to the MASP storage, unless it moves value in or out of the pool. The client needs the capability to create notes, transactions, and proofs of transactions, but it has the advantage of @@ -148,7 +146,7 @@ For cryptographic details and further information, see Note that this structure is required only by the client; the VP only handles commitments to this data. -Diversifiers are selected (randomly?) by the client and used to +Diversifiers are selected by the client and used to diversify addresses and their associated keys. `v` and `t` identify the asset type and value. Asset identifiers are derived from asset names, which are arbitrary strings (in this case, token/other asset VP @@ -217,7 +215,7 @@ struct TxOut { ``` Note that in contrast to Sapling's UTXO based approach, our transparent inputs/outputs are based on the account model used -in the rest of Anoma. +in the rest of Namada. # Shielded Transfer Specification ## Transfer Format @@ -242,17 +240,17 @@ pub struct Transfer { ``` ## Conditions Below, the conditions necessary for a valid shielded or unshielded transfer are outlined: -* A shielded component equal to `None` indicates a transparent Anoma transaction -* Otherwise the shielded component must have the form `Some(x)` where `x` has the transaction encoding specified in the [Multi-Asset Shielded Pool Specication](https://raw.githubusercontent.com/anoma/masp/main/docs/multi-asset-shielded-pool.pdf) +* A shielded component equal to `None` indicates a transparent Namada transaction +* Otherwise the shielded component must have the form `Some(x)` where `x` has the transaction encoding specified in the [Multi-Asset Shielded Pool Specs]() * Hence for a shielded transaction to be valid: - * the `Transfer` must satisfy the usual conditions for Anoma ledger transfers (i.e. sufficient funds, ...) as enforced by token and account validity predicates - * the `Transaction` must satisfy the conditions specified in the [Multi-Asset Shielded Pool Specication](https://raw.githubusercontent.com/anoma/masp/main/docs/multi-asset-shielded-pool.pdf) - * the `Transaction` and `Transfer` together must additionaly satisfy the below boundary conditions intended to ensure consistency between the MASP validity predicate ledger and Anoma ledger + * the `Transfer` must satisfy the usual conditions for Namada ledger transfers (i.e. sufficient funds, ...) as enforced by token and account validity predicates + * the `Transaction` must satisfy the conditions specified in the [Multi-Asset Shielded Pool Specification](https://github.com/anoma/masp/blob/main/docs/multi-asset-shielded-pool.pdf) + * the `Transaction` and `Transfer` together must additionally satisfy the below boundary conditions intended to ensure consistency between the MASP validity predicate ledger and Namada ledger * A key equal to `None` indicates an unpinned shielded transaction; one that can only be found by scanning and trial-decrypting the entire shielded pool * Otherwise the key must have the form `Some(x)` where `x` is a `String` such that there exists no prior accepted transaction with the same key ### Boundary Conditions -Below, the conditions necessary to maintain consistency between the MASP validity predicate ledger and Anoma ledger are outlined: +Below, the conditions necessary to maintain consistency between the MASP validity predicate ledger and Namada ledger are outlined: * If the target address is the MASP validity predicate, then no transparent outputs are permitted in the shielded transaction * If the target address is not the MASP validity predicate, then: * there must be exactly one transparent output in the shielded transaction and: @@ -287,7 +285,7 @@ Below are miscellaneous remarks on the capabilities and limitations of the curre * This key must not be reused, this is in order to avoid revealing that multiple transactions are going to the same entity ## Multi-Asset Shielded Pool Specification Differences from Zcash Protocol Specification -The [Multi-Asset Shielded Pool Specication](https://media.githubusercontent.com/media/anoma/masp/main/docs/multi-asset-shielded-pool.pdf) referenced above is in turn an extension to the [Zcash Protocol Specification](https://zips.z.cash/protocol/protocol.pdf). Below, the changes from the Zcash Protocol Specification assumed to have been integrated into the Multi-Asset Shielded Pool Specification are listed: +The [Multi-Asset Shielded Pool Specification](https://github.com/anoma/masp/blob/main/docs/multi-asset-shielded-pool.pdf) referenced above is in turn an extension to the [Zcash Protocol Specification](https://zips.z.cash/protocol/protocol.pdf). Below, the changes from the Zcash Protocol Specification assumed to have been integrated into the Multi-Asset Shielded Pool Specification are listed: * [3.2 Notes](https://zips.z.cash/protocol/protocol.pdf#notes) * Sapling note tuple must include asset type * Note commitment must be parameterized by asset type @@ -391,7 +389,7 @@ Below, the changes from [ZIP 32: Shielded Hierarchical Deterministic Wallets](ht * For extended full viewing keys on the Testnet, the Human-Readable Part is "xfvktest" # Storage Interface Specification -Anoma nodes provide interfaces that allow Anoma clients to query for specific pinned transactions, transactions accepted into the shielded pool, and allowed conversions between various asset types. Below we describe the ABCI paths and the encodings of the responses to each type of query. +Namada nodes provide interfaces that allow Namada clients to query for specific pinned transactions, transactions accepted into the shielded pool, and allowed conversions between various asset types. Below we describe the ABCI paths and the encodings of the responses to each type of query. ## Shielded Transfer Query In order to determine shielded balances belonging to particular keys or spend one's balance, it is necessary to download the transactions that transferred the assets to you. To this end, the nth transaction in the shielded pool can be obtained by getting the value at the storage path `/tx-`. Note that indexing is 0-based. This will return a quadruple of the type below: @@ -414,7 +412,7 @@ When scanning the shielded pool, it is sometimes useful know when to stop scanni ## Pinned Transfer Query A transaction pinned to the key `x` in the shielded pool can be obtained indirectly by getting the value at the storage path `/pin-`. This will return the index of the desired transaction within the shielded pool encoded as a `u64`. At this point, the above shielded transaction query can then be used to obtain the actual transaction bytes. ## Conversion Query -In order for MASP clients to convert older asset types to their latest variants, they need to query nodes for currently valid conversions. This can be done by querying the ABCI path `conv/` where `asset-type` is a hexadecimal encoding of the asset identifier as defined in [Multi-Asset Shielded Pool Specication](https://media.githubusercontent.com/media/anoma/masp/main/docs/multi-asset-shielded-pool.pdf). This will return a quadruple of the type below: +In order for MASP clients to convert older asset types to their latest variants, they need to query nodes for currently valid conversions. This can be done by querying the ABCI path `conv/` where `asset-type` is a hexadecimal encoding of the asset identifier as defined in [Multi-Asset Shielded Pool Specification](https://github.com/anoma/masp/blob/main/docs/multi-asset-shielded-pool.pdf). This will return a quadruple of the type below: ``` ( /// the token address of this asset type @@ -427,4 +425,4 @@ In order for MASP clients to convert older asset types to their latest variants, MerklePath ) ``` -If no conversions are available the amount will be exactly zero, otherwise the amount must contain negative units of the queried asset type. +If no conversions are available, the amount will be exactly zero, otherwise the amount must contain negative units of the queried asset type. diff --git a/documentation/specs/src/masp/trusted-setup.md b/documentation/specs/src/masp/trusted-setup.md index acc14f723da..328a7fe2314 100644 --- a/documentation/specs/src/masp/trusted-setup.md +++ b/documentation/specs/src/masp/trusted-setup.md @@ -1,8 +1,7 @@ # Namada Trusted Setup This spec assumes that you have some previous knowledge about Trusted Setup Ceremonies. If not, you might want to check the following two articles: [Setup Ceremonies - ZKProof](https://zkproof.org/2021/06/30/setup-ceremonies/) and [Parameter Generation - Zcash](https://z.cash/technology/paramgen/). -The Namada Trusted Setup (TS) consists of running the phase 2 of the MPC which is a circuit-specific step to construct the multi-asset shielded pool circuit. Our phase 2 takes as input the Powers of Tau (phase 1) ran by Zcash that can be found [here](https://download.z.cash/downloads/powersoftau/). - +The Namada Trusted Setup (TS) consists of running the phase 2 of the MPC which is a circuit-specific step to construct the multi-asset shielded pool circuit. Our phase 2 takes as input the Powers of Tau (phase 1) ran by Zcash that can be found [here](https://download.z.cash/downloads/powersoftau/). You can sign up for the Namada Trusted Setup [here](https://namada.net/trusted-setup.html). ## Contribution flow diff --git a/documentation/specs/src/user-interfaces/external-integrations.md b/documentation/specs/src/user-interfaces/external-integrations.md index f6fd4855b61..c459d52499b 100644 --- a/documentation/specs/src/user-interfaces/external-integrations.md +++ b/documentation/specs/src/user-interfaces/external-integrations.md @@ -10,7 +10,7 @@ * Rosetta integration * Datahub integration * WalletConnect integration -* Ledger integration +* [Ledger integration](../masp/ledger-integration.md) * External integrations * Figment * P2P diff --git a/documentation/specs/src/user-interfaces/web-explorer-interface.md b/documentation/specs/src/user-interfaces/web-explorer-interface.md index 9c6225d6745..c7dbfa3c80b 100644 --- a/documentation/specs/src/user-interfaces/web-explorer-interface.md +++ b/documentation/specs/src/user-interfaces/web-explorer-interface.md @@ -1,9 +1,9 @@ # Web explorer interface * Block explorer - * Display PoS state - * Display governance state - * Display transparent transfers - * Display transfers in and out of the MASP - * Display total values for the MASP + * Displays PoS state + * Displays governance state + * Displays transparent transfers + * Displays transfers in and out of the MASP + * Displays total values for the MASP * Allows tx hashes of shielded transfers to be looked up for confirmation diff --git a/documentation/specs/src/user-interfaces/web-wallet-interface.md b/documentation/specs/src/user-interfaces/web-wallet-interface.md index 615a74178ff..d34628ef6af 100644 --- a/documentation/specs/src/user-interfaces/web-wallet-interface.md +++ b/documentation/specs/src/user-interfaces/web-wallet-interface.md @@ -2,7 +2,7 @@ ## Application Features -The application consist of the an UI that allows the user to perform the following actions in an easy to use and consistent web application: +The application consists of a UI that allows the user to perform the following actions in an easy to use and consistent web application: ### Seed Phrase [hifi Designs](https://www.figma.com/file/aiWZpaXjPLW6fDjE7dpFaU/Projects-2021?node-id=4610%3A5890) @@ -16,9 +16,9 @@ The application consist of the an UI that allows the user to perform the followi * When entering the app, the user is being prompted for a password to decrypt the key pair to be able to perform actions [wireframe](https://www.figma.com/file/aiWZpaXjPLW6fDjE7dpFaU/Projects-2021?node-id=6442%3A5801) * Can create accounts derived from the master key pair * Can delete accounts -* User can integrated with Ledger hardware wallet - * Set up flow TBD - * Managing TBD +* User can integrate with Ledger hardware wallet + * Set up + * Management * Can see an overview of the assets in the account and all derived accounts [wireframe](https://www.figma.com/file/aiWZpaXjPLW6fDjE7dpFaU/Projects-2021?node-id=6442%3A5492) * Can see the details of a single asset, containing the following information [wireframe](https://www.figma.com/file/aiWZpaXjPLW6fDjE7dpFaU/Projects-2021?node-id=6442%3A5681) * Balance @@ -26,7 +26,6 @@ The application consist of the an UI that allows the user to perform the followi * Button to initiate a new transfer using this asset ### Transfers -[TBD]() * Can create transparent transfers * Can create shielded transfers * Bi-directional transfer between Namada and ETH @@ -35,7 +34,6 @@ The application consist of the an UI that allows the user to perform the followi * Supports approving transactions with Keplr ### Staking & Governance -[TBD]() * Can bond funds to a list of validators * Can un-bond funds to a list of validators * Can submit proposals diff --git a/documentation/specs/src/user-interfaces/web-wallet.md b/documentation/specs/src/user-interfaces/web-wallet.md index 370a0a53977..2196400f2f4 100644 --- a/documentation/specs/src/user-interfaces/web-wallet.md +++ b/documentation/specs/src/user-interfaces/web-wallet.md @@ -142,7 +142,7 @@ User can: [Wireframe 2](https://www.figma.com/file/aiWZpaXjPLW6fDjE7dpFaU/Projects-2021?node-id=6496%3A14405) User can: view 1: -* enter the details (TBD) of the proposal +* enter the details of the proposal * see a summary of the proposal * submit the proposal diff --git a/encoding_spec/Cargo.toml b/encoding_spec/Cargo.toml index e6ca933d05d..a62dbd2e40e 100644 --- a/encoding_spec/Cargo.toml +++ b/encoding_spec/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_encoding_spec" readme = "../README.md" resolver = "2" -version = "0.8.1" +version = "0.9.0" [features] default = [] diff --git a/genesis/dev.toml b/genesis/dev.toml index 29a4b4d7917..fc95244e143 100644 --- a/genesis/dev.toml +++ b/genesis/dev.toml @@ -26,10 +26,10 @@ net_address = "127.0.0.1:26656" # Some tokens present at genesis. -[token.xan] +[token.nam] address = "atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5" vp = "vp_token" -[token.xan.balances] +[token.nam.balances] # In token balances, we can use: # 1. An address any account a1qyqzsqqqqqcyvvf5xcu5vd6rg4z5233hg9pn23pjgdryzdjy8pz52wzxxscnvvjxx3rryvzz8y5p6mtz = 1000000 @@ -168,7 +168,7 @@ light_client_attack_slash_rate = 500 # Governance parameters. [gov_params] -# minimum amount of xan token to lock +# minimum amount of nam token to lock min_proposal_fund = 500 # proposal code size in bytes max_proposal_code_size = 300000 diff --git a/genesis/e2e-tests-single-node.toml b/genesis/e2e-tests-single-node.toml index 0e3a6d3fc86..95e51173c64 100644 --- a/genesis/e2e-tests-single-node.toml +++ b/genesis/e2e-tests-single-node.toml @@ -5,7 +5,7 @@ genesis_time = "2021-09-30T10:00:00Z" [validator.validator-0] -# Validator's staked XAN at genesis. +# Validator's staked NAM at genesis. tokens = 200000 # Amount of the validator's genesis token balance which is not staked. non_staked_balance = 1000000000000 @@ -20,10 +20,10 @@ net_address = "127.0.0.1:27656" # Some tokens present at genesis. -[token.XAN] +[token.NAM] address = "atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5" vp = "vp_token" -[token.XAN.balances] +[token.NAM.balances] Albert = 1000000 "Albert.public_key" = 100 Bertha = 1000000 @@ -168,7 +168,7 @@ light_client_attack_slash_rate = 500 # Governance parameters. [gov_params] -# minimum amount of xan token to lock +# minimum amount of nam token to lock min_proposal_fund = 500 # proposal code size in bytes max_proposal_code_size = 300000 diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 00614193118..00217200102 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_macros" resolver = "2" -version = "0.8.1" +version = "0.9.0" [lib] proc-macro = true diff --git a/proof_of_stake/Cargo.toml b/proof_of_stake/Cargo.toml index 82522bc3d63..d6ee686121b 100644 --- a/proof_of_stake/Cargo.toml +++ b/proof_of_stake/Cargo.toml @@ -6,7 +6,7 @@ license = "GPL-3.0" name = "namada_proof_of_stake" readme = "../README.md" resolver = "2" -version = "0.8.1" +version = "0.9.0" [features] default = [] diff --git a/proof_of_stake/src/lib.rs b/proof_of_stake/src/lib.rs index d23450b74a7..c2fd2523ccf 100644 --- a/proof_of_stake/src/lib.rs +++ b/proof_of_stake/src/lib.rs @@ -102,7 +102,7 @@ pub trait PosReadOnly { const POS_ADDRESS: Self::Address; /// Address of the staking token - /// TODO: this should be `const`, but in the ledger `address::xan` is not a + /// TODO: this should be `const`, but in the ledger `address::nam` is not a /// `const fn` fn staking_token_address() -> Self::Address; @@ -613,7 +613,7 @@ pub trait PosBase { /// Address of the PoS account const POS_ADDRESS: Self::Address; /// Address of the staking token - /// TODO: this should be `const`, but in the ledger `address::xan` is not a + /// TODO: this should be `const`, but in the ledger `address::nam` is not a /// `const fn` fn staking_token_address() -> Self::Address; /// Address of the slash pool, into which slashed tokens are transferred. diff --git a/scripts/release.sh b/scripts/release.sh index b32ff707647..71f99dd22a9 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -1,4 +1,5 @@ #!/bin/sh +# depends on cargo-release 0.21.4, git 2.24.0 or later, unclog 0.5.0 set -e if [ -z "$1" ]; then diff --git a/shared/Cargo.toml b/shared/Cargo.toml index 9b5c65414e6..c7be0fe6081 100644 --- a/shared/Cargo.toml +++ b/shared/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada" resolver = "2" -version = "0.8.1" +version = "0.9.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -27,6 +27,7 @@ ibc-mocks-abcipp = [ ] # for integration tests and test utilies testing = [ + "async-client", "proptest", "rand", "rand_core", @@ -37,6 +38,7 @@ wasm-runtime = [ "loupe", "parity-wasm", "pwasm-utils", + "rayon", "wasmer-cache", "wasmer-compiler-singlepass", "wasmer-engine-dylib", @@ -49,6 +51,15 @@ wasm-runtime = [ secp256k1-sign-verify = [ "libsecp256k1/hmac", ] +# Enable queries support for an async client +async-client = [ + "async-trait", +] +# tendermint-rpc support +tendermint-rpc = [ + "async-client", + "dep:tendermint-rpc", +] abcipp = [ "ibc-proto-abcipp", @@ -72,6 +83,7 @@ ark-serialize = "0.3" # We switch off "blake2b" because it cannot be compiled to wasm # branch = "bat/arse-merkle-tree" arse-merkle-tree = {package = "sparse-merkle-tree", git = "https://github.com/heliaxdev/sparse-merkle-tree", rev = "04ad1eeb28901b57a7599bbe433b3822965dabe8", default-features = false, features = ["std", "borsh"]} +async-trait = {version = "0.1.51", optional = true} bech32 = "0.8.0" borsh = "0.9.0" chrono = {version = "0.4.22", default-features = false, features = ["clock", "std"]} @@ -97,6 +109,7 @@ itertools = "0.10.0" loupe = {version = "0.1.3", optional = true} libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9", default-features = false, features = ["std", "static-context"]} parity-wasm = {version = "0.42.2", optional = true} +paste = "1.0.9" # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm", optional = true} prost = "0.9.0" @@ -105,6 +118,7 @@ pwasm-utils = {version = "0.18.0", optional = true} rand = {version = "0.8", optional = true} # TODO proptest rexports the RngCore trait but the re-implementations only work for version `0.8`. *sigh* rand_core = {version = "0.6", optional = true} +rayon = {version = "=1.5.3", optional = true} rust_decimal = "1.14.3" serde = {version = "1.0.125", features = ["derive"]} serde_json = "1.0.62" @@ -113,8 +127,10 @@ sha2 = "0.9.3" tempfile = {version = "3.2.0", optional = true} # temporarily using fork work-around for https://github.com/informalsystems/tendermint-rs/issues/971 tendermint-abcipp = {package = "tendermint", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} +tendermint-rpc-abcipp = {package = "tendermint-rpc", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", features = ["http-client"], optional = true} tendermint-proto-abcipp = {package = "tendermint-proto", git = "https://github.com/heliaxdev/tendermint-rs", rev = "95c52476bc37927218374f94ac8e2a19bd35bec9", optional = true} tendermint = {version = "0.23.6", optional = true} +tendermint-rpc = {version = "0.23.6", features = ["http-client"], optional = true} tendermint-proto = {version = "0.23.6", optional = true} thiserror = "1.0.30" tiny-keccak = {version = "2.0.2", features = ["keccak"]} @@ -130,12 +146,14 @@ zeroize = "1.5.5" [dev-dependencies] assert_matches = "1.5.0" +async-trait = {version = "0.1.51"} byte-unit = "4.0.13" libsecp256k1 = {git = "https://github.com/heliaxdev/libsecp256k1", rev = "bbb3bd44a49db361f21d9db80f9a087c194c0ae9"} pretty_assertions = "0.7.2" # A fork with state machine testing proptest = {git = "https://github.com/heliaxdev/proptest", branch = "tomas/sm"} test-log = {version = "0.2.7", default-features = false, features = ["trace"]} +tokio = {version = "1.8.2", default-features = false, features = ["rt", "macros"]} tracing-subscriber = {version = "0.3.7", default-features = false, features = ["env-filter", "fmt"]} [build-dependencies] diff --git a/shared/src/ledger/eth_bridge/bridge_pool_vp.rs b/shared/src/ledger/eth_bridge/bridge_pool_vp.rs index a765a423af4..3a3debc1622 100644 --- a/shared/src/ledger/eth_bridge/bridge_pool_vp.rs +++ b/shared/src/ledger/eth_bridge/bridge_pool_vp.rs @@ -21,7 +21,7 @@ use crate::ledger::native_vp::{Ctx, NativeVp, StorageReader}; use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::{DBIter, DB}; use crate::proto::SignedTxData; -use crate::types::address::{xan, Address, InternalAddress}; +use crate::types::address::{nam, Address, InternalAddress}; use crate::types::eth_bridge_pool::PendingTransfer; use crate::types::storage::Key; use crate::types::token::{balance_key, Amount}; @@ -58,7 +58,7 @@ where /// Get the change in the balance of an account /// associated with an address fn account_balance_delta(&self, address: &Address) -> Option { - let account_key = balance_key(&xan(), address); + let account_key = balance_key(&nam(), address); let before: Amount = (&self.ctx) .read_pre_value(&account_key) .unwrap_or_else(|error| { @@ -248,14 +248,14 @@ mod test_bridge_pool_vp { writelog .write(&get_pending_key(&transfer), transfer.try_to_vec().unwrap()) .unwrap(); - let escrow_key = balance_key(&xan(), &BRIDGE_POOL_ADDRESS); + let escrow_key = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); let amount: Amount = ESCROWED_AMOUNT.into(); writelog .write(&escrow_key, amount.try_to_vec().unwrap()) .unwrap(); // setup a user with a balance - let bertha_account_key = balance_key(&xan(), &bertha_address()); + let bertha_account_key = balance_key(&nam(), &bertha_address()); let bertha_wealth: Amount = BERTHA_WEALTH.into(); writelog .write(&bertha_account_key, bertha_wealth.try_to_vec().unwrap()) @@ -323,7 +323,7 @@ mod test_bridge_pool_vp { }, }; // change the payers account - let bertha_account_key = balance_key(&xan(), &bertha_address()); + let bertha_account_key = balance_key(&nam(), &bertha_address()); let new_bertha_balance = match payer_delta { SignedAmount::Positive(amount) => { Amount::from(BERTHA_WEALTH) + amount @@ -338,7 +338,7 @@ mod test_bridge_pool_vp { .write(&bertha_account_key, new_bertha_balance) .expect("Test failed"); // change the escrow account - let escrow = balance_key(&xan(), &BRIDGE_POOL_ADDRESS); + let escrow = balance_key(&nam(), &BRIDGE_POOL_ADDRESS); let new_escrow_balance = match escrow_delta { SignedAmount::Positive(amount) => { Amount::from(ESCROWED_AMOUNT) + amount diff --git a/shared/src/ledger/governance/mod.rs b/shared/src/ledger/governance/mod.rs index b565970c76e..981cb909c46 100644 --- a/shared/src/ledger/governance/mod.rs +++ b/shared/src/ledger/governance/mod.rs @@ -18,7 +18,7 @@ use self::storage as gov_storage; use crate::ledger::native_vp::{Ctx, NativeVp}; use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::{self as ledger_storage}; -use crate::types::address::{xan as m1t, Address, InternalAddress}; +use crate::types::address::{nam, Address, InternalAddress}; use crate::types::storage::Key; use crate::types::token as token_storage; use crate::vm::WasmCacheAccess; @@ -236,7 +236,7 @@ where KeyType::COUNTER(vp::validate_counter_key) } else if gov_storage::is_parameter_key(value) { KeyType::PARAMETER(vp::validate_parameter_key) - } else if token_storage::is_balance_key(&m1t(), value).is_some() { + } else if token_storage::is_balance_key(&nam(), value).is_some() { KeyType::BALANCE(vp::validate_balance_key) } else if gov_storage::is_governance_key(value) { KeyType::UNKNOWN_GOVERNANCE(vp::validate_unknown_governance_key) diff --git a/shared/src/ledger/governance/vp.rs b/shared/src/ledger/governance/vp.rs index aab5171ee98..03003551ff4 100644 --- a/shared/src/ledger/governance/vp.rs +++ b/shared/src/ledger/governance/vp.rs @@ -9,7 +9,7 @@ use crate::ledger::native_vp::{self, Ctx}; use crate::ledger::pos::{self as pos_storage, BondId, Bonds}; use crate::ledger::storage::traits::StorageHasher; use crate::ledger::storage::{self as ledger_storage}; -use crate::types::address::{xan as m1t, Address, InternalAddress}; +use crate::types::address::{nam, Address, InternalAddress}; use crate::types::storage::{Epoch, Key}; use crate::types::token; use crate::vm::WasmCacheAccess; @@ -54,7 +54,7 @@ where H: 'static + StorageHasher, CA: 'static + WasmCacheAccess, { - let balance_key = token::balance_key(&m1t(), &ADDRESS); + let balance_key = token::balance_key(&nam(), &ADDRESS); let min_funds_parameter_key = gov_storage::get_min_proposal_fund_key(); let min_funds_parameter: Option = read(ctx, &min_funds_parameter_key, ReadType::PRE).ok(); @@ -165,7 +165,7 @@ where CA: 'static + WasmCacheAccess, { let funds_key = gov_storage::get_funds_key(proposal_id); - let balance_key = token::balance_key(&m1t(), &ADDRESS); + let balance_key = token::balance_key(&nam(), &ADDRESS); let min_funds_parameter_key = gov_storage::get_min_proposal_fund_key(); let min_funds_parameter: Option = read(ctx, &min_funds_parameter_key, ReadType::PRE).ok(); diff --git a/shared/src/ledger/ibc/vp/mod.rs b/shared/src/ledger/ibc/vp/mod.rs index b6ffd32cba3..8a8e86de4b9 100644 --- a/shared/src/ledger/ibc/vp/mod.rs +++ b/shared/src/ledger/ibc/vp/mod.rs @@ -1464,7 +1464,7 @@ mod tests { source_port: get_port_id(), source_channel: get_channel_id(), token: Some(Coin { - denom: "XAN".to_string(), + denom: "NAM".to_string(), amount: 100u64.to_string(), }), sender: Signer::new("sender"), @@ -1723,7 +1723,7 @@ mod tests { source_port: get_port_id(), source_channel: get_channel_id(), token: Some(Coin { - denom: "XAN".to_string(), + denom: "NAM".to_string(), amount: 100u64.to_string(), }), sender: Signer::new("sender"), diff --git a/shared/src/ledger/mod.rs b/shared/src/ledger/mod.rs index ef92b1e2d9c..cbe2528b76d 100644 --- a/shared/src/ledger/mod.rs +++ b/shared/src/ledger/mod.rs @@ -7,6 +7,9 @@ pub mod ibc; pub mod native_vp; pub mod parameters; pub mod pos; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +pub mod protocol; +pub mod queries; pub mod slash_fund; pub mod storage; pub mod storage_api; diff --git a/shared/src/ledger/pos/mod.rs b/shared/src/ledger/pos/mod.rs index e3f7a1956e2..e19ed479c6b 100644 --- a/shared/src/ledger/pos/mod.rs +++ b/shared/src/ledger/pos/mod.rs @@ -31,9 +31,9 @@ pub const ADDRESS: Address = Address::Internal(InternalAddress::PoS); pub const SLASH_POOL_ADDRESS: Address = Address::Internal(InternalAddress::PosSlashPool); -/// Address of the staking token (XAN) +/// Address of the staking token (NAM) pub fn staking_token_address() -> Address { - address::xan() + address::nam() } /// Initialize storage in the genesis block. diff --git a/apps/src/lib/node/ledger/protocol/mod.rs b/shared/src/ledger/protocol/mod.rs similarity index 89% rename from apps/src/lib/node/ledger/protocol/mod.rs rename to shared/src/ledger/protocol/mod.rs index e7b85b0f6cf..a5669d1a066 100644 --- a/apps/src/lib/node/ledger/protocol/mod.rs +++ b/shared/src/ledger/protocol/mod.rs @@ -4,36 +4,33 @@ mod transactions; use std::collections::BTreeSet; use std::panic; -use namada::ledger::eth_bridge::bridge_pool_vp::BridgePoolVp; -use namada::ledger::eth_bridge::vp::EthBridge; -use namada::ledger::gas::{self, BlockGasMeter, VpGasMeter}; -use namada::ledger::governance::GovernanceVp; -use namada::ledger::ibc::vp::{Ibc, IbcToken}; -use namada::ledger::native_vp::{self, NativeVp}; -use namada::ledger::parameters::{self, ParametersVp}; -use namada::ledger::pos::{self, PosVP}; -use namada::ledger::slash_fund::SlashFundVp; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::write_log::WriteLog; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::proto::{self, Tx}; -use namada::types::address::{Address, InternalAddress}; -use namada::types::storage; -use namada::types::transaction::protocol::{ProtocolTx, ProtocolTxType}; -use namada::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; -#[cfg(not(feature = "abcipp"))] -use namada::types::vote_extensions::ethereum_events; -use namada::vm::wasm::{TxCache, VpCache}; -use namada::vm::{self, wasm, WasmCacheAccess}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use thiserror::Error; -use crate::node::ledger::shell::Shell; +use crate::ledger::eth_bridge::bridge_pool_vp::BridgePoolVp; +use crate::ledger::eth_bridge::vp::EthBridge; +use crate::ledger::gas::{self, BlockGasMeter, VpGasMeter}; +use crate::ledger::governance::GovernanceVp; +use crate::ledger::ibc::vp::{Ibc, IbcToken}; +use crate::ledger::native_vp::{self, NativeVp}; +use crate::ledger::parameters::{self, ParametersVp}; +use crate::ledger::pos::{self, PosVP}; +use crate::ledger::slash_fund::SlashFundVp; +use crate::ledger::storage::write_log::WriteLog; +use crate::ledger::storage::{DBIter, Storage, StorageHasher, DB}; +use crate::proto::{self, Tx}; +use crate::types::address::{Address, InternalAddress}; +use crate::types::storage; +use crate::types::transaction::protocol::{ProtocolTx, ProtocolTxType}; +use crate::types::transaction::{DecryptedTx, TxResult, TxType, VpsResult}; +use crate::vm::wasm::{TxCache, VpCache}; +use crate::vm::{self, wasm, WasmCacheAccess}; +#[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { #[error("Storage error: {0}")] - StorageError(namada::ledger::storage::Error), + StorageError(crate::ledger::storage::Error), #[error("Error decoding a transaction from bytes: {0}")] TxDecodingError(proto::Error), #[error("Transaction runner error: {0}")] @@ -49,7 +46,7 @@ pub enum Error { #[error("The address {0} doesn't exist")] MissingAddress(Address), #[error("IBC native VP: {0}")] - IbcNativeVpError(namada::ledger::ibc::vp::Error), + IbcNativeVpError(crate::ledger::ibc::vp::Error), #[error("PoS native VP: {0}")] PosNativeVpError(pos::vp::Error), #[error("PoS native VP panicked")] @@ -57,20 +54,21 @@ pub enum Error { #[error("Parameters native VP: {0}")] ParametersNativeVpError(parameters::Error), #[error("IBC Token native VP: {0}")] - IbcTokenNativeVpError(namada::ledger::ibc::vp::IbcTokenError), + IbcTokenNativeVpError(crate::ledger::ibc::vp::IbcTokenError), #[error("Governance native VP error: {0}")] - GovernanceNativeVpError(namada::ledger::governance::vp::Error), + GovernanceNativeVpError(crate::ledger::governance::vp::Error), #[error("SlashFund native VP error: {0}")] - SlashFundNativeVpError(namada::ledger::slash_fund::Error), + SlashFundNativeVpError(crate::ledger::slash_fund::Error), #[error("Ethereum bridge native VP error: {0}")] - EthBridgeNativeVpError(namada::ledger::eth_bridge::vp::Error), + EthBridgeNativeVpError(crate::ledger::eth_bridge::vp::Error), #[error("Ethereum bridge pool native VP error: {0}")] - BridgePoolNativeVpError(namada::ledger::eth_bridge::bridge_pool_vp::Error), + BridgePoolNativeVpError(crate::ledger::eth_bridge::bridge_pool_vp::Error), #[error("Access to an internal address {0} is forbidden")] AccessForbidden(InternalAddress), } -pub(crate) struct ShellParams<'a, D, H, CA> +#[allow(missing_docs)] +pub struct ShellParams<'a, D, H, CA> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -83,23 +81,7 @@ where pub tx_wasm_cache: &'a mut TxCache, } -impl<'a, D, H> From<&'a mut Shell> - for ShellParams<'a, D, H, namada::vm::WasmCacheRwAccess> -where - D: 'static + DB + for<'iter> DBIter<'iter> + Sync, - H: 'static + StorageHasher + Sync, -{ - fn from(shell: &'a mut Shell) -> Self { - Self { - block_gas_meter: &mut shell.gas_meter, - write_log: &mut shell.write_log, - storage: &shell.storage, - vp_wasm_cache: &mut shell.vp_wasm_cache, - tx_wasm_cache: &mut shell.tx_wasm_cache, - } - } -} - +/// Result of applying a transaction pub type Result = std::result::Result; /// Dispatch a given transaction to be applied based on its type. Some storage @@ -109,7 +91,7 @@ pub type Result = std::result::Result; /// If the given tx is a successfully decrypted payload apply the necessary /// vps. Otherwise, we include the tx on chain with the gas charge added /// but no further validations. -pub(crate) fn dispatch_tx<'a, D, H, CA>( +pub fn dispatch_tx<'a, D, H, CA>( tx_type: TxType, tx_length: usize, block_gas_meter: &'a mut BlockGasMeter, @@ -217,6 +199,8 @@ where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, { + use crate::types::vote_extensions::ethereum_events; + match tx { ProtocolTxType::EthereumEvents(ethereum_events::VextDigest { events, diff --git a/apps/src/lib/node/ledger/protocol/transactions/ethereum_events/eth_msgs.rs b/shared/src/ledger/protocol/transactions/ethereum_events/eth_msgs.rs similarity index 85% rename from apps/src/lib/node/ledger/protocol/transactions/ethereum_events/eth_msgs.rs rename to shared/src/ledger/protocol/transactions/ethereum_events/eth_msgs.rs index 47e0caf1117..d29685004c6 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/ethereum_events/eth_msgs.rs +++ b/shared/src/ledger/protocol/transactions/ethereum_events/eth_msgs.rs @@ -1,12 +1,12 @@ use std::collections::BTreeSet; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada::types::address::Address; -use namada::types::ethereum_events::EthereumEvent; -use namada::types::storage::BlockHeight; -use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; -use crate::node::ledger::protocol::transactions::votes::Tally; +use crate::ledger::protocol::transactions::votes::Tally; +use crate::types::address::Address; +use crate::types::ethereum_events::EthereumEvent; +use crate::types::storage::BlockHeight; +use crate::types::vote_extensions::ethereum_events::MultiSignedEthEvent; /// Represents an Ethereum event being seen by some validators #[derive( @@ -57,13 +57,12 @@ pub struct EthMsg { mod tests { use std::collections::BTreeSet; - use namada::types::address; - use namada::types::ethereum_events::testing::{ + use super::*; + use crate::types::address; + use crate::types::ethereum_events::testing::{ arbitrary_nonce, arbitrary_single_transfer, }; - use namada::types::storage::BlockHeight; - - use super::*; + use crate::types::storage::BlockHeight; #[test] /// Tests [`From`] for [`EthMsgUpdate`] diff --git a/apps/src/lib/node/ledger/protocol/transactions/ethereum_events/events.rs b/shared/src/ledger/protocol/transactions/ethereum_events/events.rs similarity index 92% rename from apps/src/lib/node/ledger/protocol/transactions/ethereum_events/events.rs rename to shared/src/ledger/protocol/transactions/ethereum_events/events.rs index 2ed1b79cf2a..49cbb5e6af2 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/ethereum_events/events.rs +++ b/shared/src/ledger/protocol/transactions/ethereum_events/events.rs @@ -3,13 +3,13 @@ use std::collections::BTreeSet; use eyre::Result; -use namada::ledger::eth_bridge::storage::wrapped_erc20s; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::types::ethereum_events::{EthereumEvent, TransferToNamada}; -use namada::types::storage::Key; -use crate::node::ledger::protocol::transactions::update; +use crate::ledger::eth_bridge::storage::wrapped_erc20s; +use crate::ledger::protocol::transactions::update; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::types::ethereum_events::{EthereumEvent, TransferToNamada}; +use crate::types::storage::Key; /// Updates storage based on the given confirmed `event`. For example, for a /// confirmed [`EthereumEvent::TransfersToNamada`], mint the corresponding @@ -90,15 +90,15 @@ mod tests { use assert_matches::assert_matches; use borsh::BorshSerialize; - use namada::ledger::storage::testing::TestStorage; - use namada::types::address; - use namada::types::ethereum_events::testing::{ + + use super::*; + use crate::ledger::storage::testing::TestStorage; + use crate::types::address; + use crate::types::ethereum_events::testing::{ arbitrary_eth_address, arbitrary_keccak_hash, arbitrary_nonce, DAI_ERC20_ETH_ADDRESS, }; - use namada::types::token::Amount; - - use super::*; + use crate::types::token::Amount; #[test] /// Test that we do not make any changes to storage when acting on most diff --git a/apps/src/lib/node/ledger/protocol/transactions/ethereum_events/mod.rs b/shared/src/ledger/protocol/transactions/ethereum_events/mod.rs similarity index 92% rename from apps/src/lib/node/ledger/protocol/transactions/ethereum_events/mod.rs rename to shared/src/ledger/protocol/transactions/ethereum_events/mod.rs index f6038589b37..bd6361832b9 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/ethereum_events/mod.rs +++ b/shared/src/ledger/protocol/transactions/ethereum_events/mod.rs @@ -8,21 +8,21 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use eth_msgs::{EthMsg, EthMsgUpdate}; use eyre::Result; -use namada::ledger::eth_bridge::storage::vote_tallies; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::types::address::Address; -use namada::types::storage::{self, BlockHeight}; -use namada::types::transaction::TxResult; -use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; -use namada::types::voting_power::FractionalVotingPower; - -use crate::node::ledger::protocol::transactions::utils::{ + +use crate::ledger::eth_bridge::storage::vote_tallies; +use crate::ledger::protocol::transactions::utils::{ self, get_active_validators, }; -use crate::node::ledger::protocol::transactions::votes::{ +use crate::ledger::protocol::transactions::votes::{ calculate_new, calculate_updated, write, }; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::types::address::Address; +use crate::types::storage::{self, BlockHeight}; +use crate::types::transaction::TxResult; +use crate::types::vote_extensions::ethereum_events::MultiSignedEthEvent; +use crate::types::voting_power::FractionalVotingPower; /// The keys changed while applying a protocol transaction type ChangedKeys = BTreeSet; @@ -194,23 +194,23 @@ mod tests { use std::collections::{BTreeSet, HashMap, HashSet}; use borsh::BorshDeserialize; - use namada::ledger::eth_bridge::storage::wrapped_erc20s; - use namada::ledger::pos::namada_proof_of_stake::epoched::Epoched; - use namada::ledger::pos::namada_proof_of_stake::PosBase; - use namada::ledger::pos::types::{ValidatorSet, WeightedValidator}; - use namada::ledger::storage::mockdb::MockDB; - use namada::ledger::storage::testing::TestStorage; - use namada::ledger::storage::traits::Sha256Hasher; - use namada::types::address; - use namada::types::ethereum_events::testing::{ - arbitrary_amount, arbitrary_eth_address, arbitrary_nonce, - DAI_ERC20_ETH_ADDRESS, - }; - use namada::types::ethereum_events::{EthereumEvent, TransferToNamada}; - use namada::types::token::Amount; use storage::BlockHeight; use super::*; + use crate::ledger::eth_bridge::storage::wrapped_erc20s; + use crate::ledger::pos::namada_proof_of_stake::epoched::Epoched; + use crate::ledger::pos::namada_proof_of_stake::PosBase; + use crate::ledger::pos::types::{ValidatorSet, WeightedValidator}; + use crate::ledger::storage::mockdb::MockDB; + use crate::ledger::storage::testing::TestStorage; + use crate::ledger::storage::traits::Sha256Hasher; + use crate::types::address; + use crate::types::ethereum_events::testing::{ + arbitrary_amount, arbitrary_eth_address, arbitrary_nonce, + DAI_ERC20_ETH_ADDRESS, + }; + use crate::types::ethereum_events::{EthereumEvent, TransferToNamada}; + use crate::types::token::Amount; #[test] /// Test applying a `TransfersToNamada` batch containing a single transfer diff --git a/apps/src/lib/node/ledger/protocol/transactions/mod.rs b/shared/src/ledger/protocol/transactions/mod.rs similarity index 100% rename from apps/src/lib/node/ledger/protocol/transactions/mod.rs rename to shared/src/ledger/protocol/transactions/mod.rs diff --git a/apps/src/lib/node/ledger/protocol/transactions/read.rs b/shared/src/ledger/protocol/transactions/read.rs similarity index 88% rename from apps/src/lib/node/ledger/protocol/transactions/read.rs rename to shared/src/ledger/protocol/transactions/read.rs index fbbf5e16084..6183b7b23ad 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/read.rs +++ b/shared/src/ledger/protocol/transactions/read.rs @@ -1,10 +1,11 @@ //! Helpers for reading from storage use borsh::BorshDeserialize; use eyre::{eyre, Result}; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::types::storage; -use namada::types::token::Amount; + +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::types::storage; +use crate::types::token::Amount; /// Returns the stored Amount, or 0 if not stored pub(super) fn amount_or_default( @@ -54,11 +55,11 @@ where mod tests { use assert_matches::assert_matches; use borsh::BorshSerialize; - use namada::ledger::storage::testing::TestStorage; - use namada::types::storage; - use namada::types::token::Amount; - use crate::node::ledger::protocol::transactions::read; + use crate::ledger::protocol::transactions::read; + use crate::ledger::storage::testing::TestStorage; + use crate::types::storage; + use crate::types::token::Amount; #[test] fn test_amount_returns_zero_for_uninitialized_storage() { diff --git a/apps/src/lib/node/ledger/protocol/transactions/update.rs b/shared/src/ledger/protocol/transactions/update.rs similarity index 88% rename from apps/src/lib/node/ledger/protocol/transactions/update.rs rename to shared/src/ledger/protocol/transactions/update.rs index a4e900d00d6..d70ed4d3130 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/update.rs +++ b/shared/src/ledger/protocol/transactions/update.rs @@ -1,10 +1,11 @@ //! Helpers for writing to storage use borsh::{BorshDeserialize, BorshSerialize}; use eyre::Result; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::types::storage; -use namada::types::token::Amount; + +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::types::storage; +use crate::types::token::Amount; /// Reads the `Amount` from key, applies update then writes it back pub fn amount( @@ -43,8 +44,9 @@ where mod tests { use borsh::{BorshDeserialize, BorshSerialize}; use eyre::{eyre, Result}; - use namada::ledger::storage::testing::TestStorage; - use namada::types::storage; + + use crate::ledger::storage::testing::TestStorage; + use crate::types::storage; #[test] /// Test updating a value diff --git a/apps/src/lib/node/ledger/protocol/transactions/utils.rs b/shared/src/ledger/protocol/transactions/utils.rs similarity index 93% rename from apps/src/lib/node/ledger/protocol/transactions/utils.rs rename to shared/src/ledger/protocol/transactions/utils.rs index 68f6b8a08eb..561237f07aa 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/utils.rs +++ b/shared/src/ledger/protocol/transactions/utils.rs @@ -2,15 +2,15 @@ use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use eyre::eyre; use itertools::Itertools; -use namada::ledger::pos::types::{VotingPower, WeightedValidator}; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::types::address::Address; -use namada::types::storage::BlockHeight; -use namada::types::vote_extensions::ethereum_events::MultiSignedEthEvent; -use namada::types::voting_power::FractionalVotingPower; +use namada_proof_of_stake::PosBase; -use crate::node::ledger::shell::queries::QueriesExt; +use crate::ledger::pos::types::{VotingPower, WeightedValidator}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::types::address::Address; +use crate::types::storage::BlockHeight; +use crate::types::vote_extensions::ethereum_events::MultiSignedEthEvent; +use crate::types::voting_power::FractionalVotingPower; pub(super) fn get_active_validators( storage: &Storage, @@ -22,11 +22,14 @@ where { let mut active_validators = BTreeMap::default(); for height in block_heights.into_iter() { - let epoch = storage.get_epoch(height).expect( + let epoch = storage.block.pred_epochs.get_epoch(height).expect( "The epoch of the last block height should always be known", ); - _ = active_validators - .insert(height, storage.get_active_validators(Some(epoch))); + let validator_set = storage.read_validator_set(); + let validator_set = validator_set + .get(epoch) + .expect("Validators for an epoch should be known"); + _ = active_validators.insert(height, validator_set.active.clone()); } active_validators } @@ -116,12 +119,12 @@ mod tests { use std::collections::HashSet; use assert_matches::assert_matches; - use namada::types::address; - use namada::types::ethereum_events::testing::{ - arbitrary_single_transfer, arbitrary_voting_power, - }; use super::*; + use crate::types::address; + use crate::types::ethereum_events::testing::{ + arbitrary_single_transfer, arbitrary_voting_power, + }; #[test] /// Test getting the voting power for the sole active validator from the set diff --git a/apps/src/lib/node/ledger/protocol/transactions/votes.rs b/shared/src/ledger/protocol/transactions/votes.rs similarity index 91% rename from apps/src/lib/node/ledger/protocol/transactions/votes.rs rename to shared/src/ledger/protocol/transactions/votes.rs index d597d9e3b27..071e3141f39 100644 --- a/apps/src/lib/node/ledger/protocol/transactions/votes.rs +++ b/shared/src/ledger/protocol/transactions/votes.rs @@ -5,14 +5,14 @@ use std::collections::{BTreeSet, HashMap}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use eyre::{eyre, Result}; -use namada::ledger::eth_bridge::storage::vote_tallies; -use namada::ledger::storage::traits::StorageHasher; -use namada::ledger::storage::{DBIter, Storage, DB}; -use namada::types::address::Address; -use namada::types::storage::BlockHeight; -use namada::types::voting_power::FractionalVotingPower; -use crate::node::ledger::protocol::transactions::read; +use crate::ledger::eth_bridge::storage::vote_tallies; +use crate::ledger::protocol::transactions::read; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::types::address::Address; +use crate::types::storage::BlockHeight; +use crate::types::voting_power::FractionalVotingPower; #[derive( Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, BorshSchema, diff --git a/shared/src/ledger/queries/mod.rs b/shared/src/ledger/queries/mod.rs new file mode 100644 index 00000000000..6a4f31bf57c --- /dev/null +++ b/shared/src/ledger/queries/mod.rs @@ -0,0 +1,232 @@ +//! Ledger read-only queries can be handled and dispatched via the [`RPC`] +//! defined via `router!` macro. + +use shell::{Shell, SHELL}; +#[cfg(any(test, feature = "async-client"))] +pub use types::Client; +pub use types::{ + EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, Router, +}; + +use super::storage::traits::StorageHasher; +use super::storage::{DBIter, DB}; +use super::storage_api; + +#[macro_use] +mod router; +mod shell; +mod types; + +// Most commonly expected patterns should be declared first +router! {RPC, + // Shell provides storage read access, block metadata and can dry-run a tx + ( "shell" ) = (sub SHELL), +} + +/// Handle RPC query request in the ledger. On success, returns response with +/// borsh-encoded data. +pub fn handle_path( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + RPC.handle(ctx, request) +} + +// Handler helpers: + +/// For queries that only support latest height, check that the given height is +/// not different from latest height, otherwise return an error. +pub fn require_latest_height( + ctx: &RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result<()> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + if request.height != ctx.storage.last_height { + return Err(storage_api::Error::new_const( + "This query doesn't support arbitrary block heights, only the \ + latest committed block height ('0' can be used as a special \ + value that means the latest block height)", + )); + } + Ok(()) +} + +/// For queries that do not support proofs, check that proof is not requested, +/// otherwise return an error. +pub fn require_no_proof(request: &RequestQuery) -> storage_api::Result<()> { + if request.prove { + return Err(storage_api::Error::new_const( + "This query doesn't support proofs", + )); + } + Ok(()) +} + +/// For queries that don't use request data, require that there are no data +/// attached. +pub fn require_no_data(request: &RequestQuery) -> storage_api::Result<()> { + if !request.data.is_empty() { + return Err(storage_api::Error::new_const( + "This query doesn't accept request data", + )); + } + Ok(()) +} + +#[cfg(any(test, feature = "tendermint-rpc"))] +/// Provides [`Client`] implementation for Tendermint RPC client +pub mod tm { + use thiserror::Error; + + use super::*; + use crate::types::storage::BlockHeight; + + #[allow(missing_docs)] + #[derive(Error, Debug)] + pub enum Error { + #[error("{0}")] + Tendermint(#[from] tendermint_rpc::Error), + #[error("Decoding error: {0}")] + Decoding(#[from] std::io::Error), + #[error("Info log: {0}, error code: {1}")] + Query(String, u32), + #[error("Invalid block height: {0} (overflown i64)")] + InvalidHeight(BlockHeight), + } + + #[async_trait::async_trait] + impl Client for tendermint_rpc::HttpClient { + type Error = Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height + .map(|height| { + tendermint::block::Height::try_from(height.0) + .map_err(|_err| Error::InvalidHeight(height)) + }) + .transpose()?; + let response = tendermint_rpc::Client::abci_query( + self, + // TODO open the private Path constructor in tendermint-rpc + Some(std::str::FromStr::from_str(&path).unwrap()), + data, + height, + prove, + ) + .await?; + match response.code { + tendermint::abci::Code::Ok => Ok(EncodedResponseQuery { + data: response.value, + info: response.info, + proof_ops: response.proof.map(Into::into), + }), + tendermint::abci::Code::Err(code) => { + Err(Error::Query(response.info, code)) + } + } + } + } +} + +/// Queries testing helpers +#[cfg(any(test, feature = "testing"))] +mod testing { + use tempfile::TempDir; + + use super::*; + use crate::ledger::storage::testing::TestStorage; + use crate::types::storage::BlockHeight; + use crate::vm::wasm::{self, TxCache, VpCache}; + use crate::vm::WasmCacheRoAccess; + + /// A test client that has direct access to the storage + pub struct TestClient + where + RPC: Router, + { + /// RPC router + pub rpc: RPC, + /// storage + pub storage: TestStorage, + /// VP wasm compilation cache + pub vp_wasm_cache: VpCache, + /// tx wasm compilation cache + pub tx_wasm_cache: TxCache, + /// VP wasm compilation cache directory + pub vp_cache_dir: TempDir, + /// tx wasm compilation cache directory + pub tx_cache_dir: TempDir, + } + + impl TestClient + where + RPC: Router, + { + #[allow(dead_code)] + /// Initialize a test client for the given root RPC router + pub fn new(rpc: RPC) -> Self { + // Initialize the `TestClient` + let storage = TestStorage::default(); + let (vp_wasm_cache, vp_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + let (tx_wasm_cache, tx_cache_dir) = + wasm::compilation_cache::common::testing::cache(); + Self { + rpc, + storage, + vp_wasm_cache: vp_wasm_cache.read_only(), + tx_wasm_cache: tx_wasm_cache.read_only(), + vp_cache_dir, + tx_cache_dir, + } + } + } + + #[async_trait::async_trait] + impl Client for TestClient + where + RPC: Router + Sync, + { + type Error = std::io::Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + let data = data.unwrap_or_default(); + let height = height.unwrap_or_default(); + // Handle a path by invoking the `RPC.handle` directly with the + // borrowed storage + let request = RequestQuery { + data, + path, + height, + prove, + }; + let ctx = RequestCtx { + storage: &self.storage, + vp_wasm_cache: self.vp_wasm_cache.clone(), + tx_wasm_cache: self.tx_wasm_cache.clone(), + }; + let response = self.rpc.handle(ctx, &request).unwrap(); + Ok(response) + } + } +} diff --git a/shared/src/ledger/queries/router.rs b/shared/src/ledger/queries/router.rs new file mode 100644 index 00000000000..e4823e5ad7b --- /dev/null +++ b/shared/src/ledger/queries/router.rs @@ -0,0 +1,1090 @@ +//! The main export of this module is the `router!` macro, which can be used to +//! define compile time tree patterns for a router in which the terminal leaves +//! are connected to the given handler functions. +//! +//! Note that for debugging pattern matching issue, you can uncomment +//! all the `println!`s in this module. + +use thiserror::Error; + +/// Router error. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("Found no matching pattern for the given path {0}")] + WrongPath(String), +} + +/// Find the index of a next forward slash after the given `start` index in the +/// path. When there are no more slashes, returns the index after the end of the +/// path. +/// +/// # Panics +/// The given `start` must be < `path.len()`. +pub fn find_next_slash_index(path: &str, start: usize) -> usize { + path[start..] + .find('/') + // Offset by the starting position + .map(|i| start + i) + // If not found, go to the end of path + .unwrap_or(path.len()) +} + +/// Invoke the sub-handler or call the handler function with the matched +/// arguments generated by `try_match_segments`. +macro_rules! handle_match { + // Nested router + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (sub $router:tt), ( $( $matched_args:ident, )* ), + ) => { + // not used anymore - silence the warning + let _ = $end; + // Undo last '/' advance, the next pattern has to start with `/`. + // This cannot underflow because path cannot be empty and must start + // with `/` + $start -= 1; + // Invoke `handle` on the sub router + return $router.internal_handle($ctx, $request, $start) + }; + + // Handler function that uses a request (`with_options`) + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (with_options $handle:tt), ( $( $matched_args:ident, )* ), + ) => { + // check that we're at the end of the path - trailing slash is optional + if !($end == $request.path.len() || + // ignore trailing slashes + $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { + // we're not at the end, no match + println!("Not fully matched"); + break + } + let result = $handle($ctx, $request, $( $matched_args ),* )?; + // The handle must take care of encoding if needed and return `Vec`. + // This is because for `storage_value` the bytes are returned verbatim + // as read from storage. + return Ok(result); + }; + + // Handler function that doesn't use the request, just the path args, if any + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:tt, ( $( $matched_args:ident, )* ), + ) => { + // check that we're at the end of the path - trailing slash is optional + if !($end == $request.path.len() || + // ignore trailing slashes + $end == $request.path.len() - 1 && &$request.path[$end..] == "/") { + // we're not at the end, no match + // println!("Not fully matched"); + break + } + // Check that the request is not sent with unsupported non-default + $crate::ledger::queries::require_latest_height(&$ctx, $request)?; + $crate::ledger::queries::require_no_proof($request)?; + $crate::ledger::queries::require_no_data($request)?; + + // If you get a compile error from here with `expected function, found + // queries::Storage`, you're probably missing the marker `(sub _)` + let data = $handle($ctx, $( $matched_args ),* )?; + // Encode the returned data with borsh + let data = borsh::BorshSerialize::try_to_vec(&data).into_storage_result()?; + return Ok($crate::ledger::queries::EncodedResponseQuery { + data, + info: Default::default(), + proof_ops: None, + }); + }; +} + +/// Using TT muncher pattern on the `$tail` pattern, this macro recursively +/// generates path matching logic that `break`s if some parts are unmatched. +macro_rules! try_match_segments { + // sub-pattern handle - this should only be invoked if the current + // $pattern is already matched + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + { $( $sub_pattern:tt $( -> $_sub_return_ty:path )? = $handle:tt, )* }, + $matched_args:tt, + () + ) => { + // Try to match each sub-patten + $( + // This loop never repeats, it's only used for a breaking + // mechanism when a $pattern is not matched to skip to the + // next one, if any + loop { + #[allow(unused_mut)] + let mut $start = $start; + let mut $end = $end; + // Try to match, parse args and invoke $handle, will + // break the `loop` not matched + try_match_segments!($ctx, $request, $start, $end, + $handle, $matched_args, $sub_pattern + ); + } + )* + }; + + // Terminal tail call, invoked after when all the args in the current + // pattern are matched and the $handle is not sub-pattern + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + () + ) => { + handle_match!($ctx, $request, $start, $end, $handle, ( $( $matched_args, )* ), ); + }; + + // Try to match an untyped argument, declares the expected $arg as &str + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:ident, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg = &$request.path[$start..$end]; + // Advanced index past the matched arg + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Try to match and parse a typed argument like the case below, but with + // the argument optional. + // Declares the expected $arg into type $t, if it can be parsed. + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : opt $arg_ty:ty] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg: Option<$arg_ty> = match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + // Only advance if optional argument is present, otherwise stay + // in the same position for the next match, if any. + + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + + Some(parsed) + }, + Err(_) => + { + // If arg cannot be parsed, ignore it because it's optional + None + } + }; + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Special case of the typed argument pattern below. When there are no more + // args in the tail and the handle isn't a sub-router (its handler is + // ident), we try to match the rest of the path till the end. + // + // This is specifically needed for storage methods, which have + // `storage::Key` param that includes path-like slashes. + // + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + $handle:ident, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + ) + ) => { + let $arg: $arg_ty; + $end = $request.path.len(); + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + // println!("Parsed {}", parsed); + $arg = parsed + }, + Err(_) => + { + // println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + // Invoke the terminal pattern + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), () ); + }; + + // One more special case of the typed argument pattern below for a handler + // `with_options`, where we try to match the rest of the path till the end. + // + // This is specifically needed for storage methods, which have + // `storage::Key` param that includes path-like slashes. + // + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, + (with_options $handle:ident), + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + ) + ) => { + let $arg: $arg_ty; + $end = $request.path.len(); + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + println!("Parsed {}", parsed); + $arg = parsed + }, + Err(_) => + { + println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + // Invoke the terminal pattern + try_match_segments!($ctx, $request, $start, $end, (with_options $handle), + ( $( $matched_args, )* $arg, ), () ); + }; + + // Try to match and parse a typed argument, declares the expected $arg into + // type $t, if it can be parsed + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + [$arg:ident : $arg_ty:ty] + $( / $( $tail:tt)/ * )? + ) + ) => { + let $arg: $arg_ty; + match $request.path[$start..$end].parse::<$arg_ty>() { + Ok(parsed) => { + $arg = parsed + }, + Err(_) => + { + // println!("Cannot parse {} from {}", stringify!($arg_ty), &$request.path[$start..$end]); + // If arg cannot be parsed, try to skip to next pattern + break + } + } + $start = $end; + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* $arg, ), ( $( $( $tail )/ * )? ) ); + }; + + // Try to match an expected string literal + ( + $ctx:ident, $request:ident, $start:ident, $end:ident, $handle:tt, + ( $( $matched_args:ident, )* ), + ( + $expected:literal + $( / $( $tail:tt)/ * )? + ) + ) => { + if &$request.path[$start..$end] == $expected { + // Advanced index past the matched arg + // println!("Matched literal {}", $expected); + $start = $end; + } else { + // println!("{} doesn't match literal {}", &$request.path[$start..$end], $expected); + // Try to skip to next pattern + break; + } + // advance past next '/', if any + if $start + 1 < $request.path.len() { + $start += 1; + } + $end = find_next_slash_index(&$request.path, $start); + try_match_segments!($ctx, $request, $start, $end, $handle, + ( $( $matched_args, )* ), ( $( $( $tail )/ * )? ) ); + }; +} + +/// Generate a function that tries to match the given pattern and `break`s if +/// any of its parts are unmatched. This layer will check that the path starts +/// with `/` and then invoke `try_match_segments` TT muncher that goes through +/// the patterns. +macro_rules! try_match { + ($ctx:ident, $request:ident, $start:ident, $handle:tt, $segments:tt) => { + // check that the initial char is '/' + if $request.path.is_empty() || &$request.path[..1] != "/" { + // println!("Missing initial slash"); + break; + } + // advance past initial '/' + $start += 1; + // Path is too short to match + if $start >= $request.path.len() { + // println!("Path is too short"); + break; + } + let mut end = find_next_slash_index(&$request.path, $start); + try_match_segments!( + $ctx, + $request, + $start, + end, + $handle, + (), + $segments + ); + }; +} + +/// Convert literal pattern into a `&[&'static str]` +// TODO sub router pattern is not yet used +#[allow(unused_macros)] +macro_rules! pattern_to_prefix { + ( ( $( $pattern:literal )/ * ) ) => { + &[$( $pattern ),*] + }; + ( $pattern:tt ) => { + compile_error!("sub-router cannot have non-literal prefix patterns") + }; +} + +/// Turn patterns and their handlers into methods for the router, where each +/// dynamic pattern is turned into a parameter for the method. +macro_rules! pattern_and_handler_to_method { + // Special terminal rule for `storage_value` handle from + // `shared/src/ledger/queries/shell.rs` that returns `Vec` which should + // not be decoded from response.data, but instead return as is + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + (with_options storage_value), + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `storage_value`."] + pub fn storage_value_path(&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request value with optional data (used for e.g. \ + `dry_run_tx`), optionally specified height (supported for \ + `storage_value`) and optional proof (supported for \ + `storage_value` and `storage_prefix`) from `storage_value`."] + pub async fn storage_value(&self, client: &CLIENT, + data: Option>, + height: Option<$crate::types::storage::BlockHeight>, + prove: bool, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $crate::ledger::queries::ResponseQuery>, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + println!("IMMA VEC!!!!!!"); + let path = self.storage_value_path( $( $param ),* ); + + let $crate::ledger::queries::ResponseQuery { + data, info, proof_ops + } = client.request(path, data, height, prove).await?; + + Ok($crate::ledger::queries::ResponseQuery { + data, + info, + proof_ops, + }) + } + } + }; + + // terminal rule for $handle that uses request (`with_options`) + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + (with_options $handle:tt), + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `" $handle "`."] + pub fn [<$handle _path>](&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request value with optional data (used for e.g. \ + `dry_run_tx`), optionally specified height (supported for \ + `storage_value`) and optional proof (supported for \ + `storage_value` and `storage_prefix`) from `" $handle "`."] + pub async fn $handle(&self, client: &CLIENT, + data: Option>, + height: Option<$crate::types::storage::BlockHeight>, + prove: bool, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $crate::ledger::queries::ResponseQuery<$return_type>, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + println!("IMMA not a VEC!!!!!!"); + let path = self.[<$handle _path>]( $( $param ),* ); + + let $crate::ledger::queries::ResponseQuery { + data, info, proof_ops + } = client.request(path, data, height, prove).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + + Ok($crate::ledger::queries::ResponseQuery { + data: decoded, + info, + proof_ops, + }) + } + } + }; + + // terminal rule that $handle that doesn't use request + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $return_type:path, + $handle:tt, + () + ) => { + // paste! used to construct the `fn $handle_path`'s name. + paste::paste! { + #[allow(dead_code)] + #[doc = "Get a path to query `" $handle "`."] + pub fn [<$handle _path>](&self, $( $param: &$param_ty ),* ) -> String { + itertools::join( + [ Some(std::borrow::Cow::from(&self.prefix)), $( $prefix ),* ] + .into_iter() + .filter_map(|x| x), "/") + } + + #[allow(dead_code)] + #[allow(clippy::too_many_arguments)] + #[cfg(any(test, feature = "async-client"))] + #[doc = "Request a simple borsh-encoded value from `" $handle "`, \ + without any additional request data, specified block height or \ + proof."] + pub async fn $handle(&self, client: &CLIENT, + $( $param: &$param_ty ),* + ) + -> std::result::Result< + $return_type, + ::Error + > + where CLIENT: $crate::ledger::queries::Client + std::marker::Sync { + let path = self.[<$handle _path>]( $( $param ),* ); + + let data = client.simple_request(path).await?; + + let decoded: $return_type = + borsh::BorshDeserialize::try_from_slice(&data[..])?; + Ok(decoded) + } + } + }; + + // sub-pattern + ( + $param:tt + $prefix:tt + $( $_return_type:path )?, + { $( $sub_pattern:tt $( -> $sub_return_ty:path )? = $handle:tt, )* }, + $pattern:tt + ) => { + $( + // join pattern with each sub-pattern + pattern_and_handler_to_method!( + $param + $prefix + $( $sub_return_ty )?, $handle, $pattern, $sub_pattern + ); + )* + }; + + // literal string arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( $pattern:literal $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty ),* ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($pattern)) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // untyped arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: str ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($name)) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // typed arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt: $type:ty] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: $type ) + [ $( { $prefix }, )* { std::option::Option::Some(std::borrow::Cow::from($name.to_string())) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // opt typed arg + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( [$name:tt: opt $type:ty] $( / $tail:tt )* ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty, )* $name: std::option::Option<$type> ) + [ $( { $prefix }, )* { $name.map(|arg| std::borrow::Cow::from(arg.to_string())) } ] + $( $return_type )?, $handle, ( $( $tail )/ * ) + ); + }; + + // join pattern with sub-pattern + ( + ( $( $param:tt: $param_ty:ty ),* ) + [ $( { $prefix:expr } ),* ] + $( $return_type:path )?, + $handle:tt, + ( $( $pattern:tt )/ * ), ( $( $sub_pattern:tt )/ * ) + ) => { + pattern_and_handler_to_method!( + ( $( $param: $param_ty ),* ) + [ $( { $prefix }, )* ] + $( $return_type )?, + $handle, ( $( $pattern / )* $( $sub_pattern )/ * ) + ); + }; +} + +/// TT muncher macro that generates a `struct $name` with methods for all its +/// handlers. +macro_rules! router_type { + // terminal rule + ($name:ident { $( $methods:item )* }, ) => { + paste::paste! { + #[doc = "`" $name "`path router type"] + pub struct $name { + prefix: String, + } + + impl $name { + #[doc = "Construct this router as a root router"] + pub const fn new() -> Self { + Self { + prefix: String::new(), + } + } + + #[allow(dead_code)] + #[doc = "Construct this router as a sub-router at the given prefix path"] + pub const fn sub(prefix: String) -> Self { + Self { + prefix, + } + } + + // paste the generated methods + $( $methods )* + } + } + }; + + // a sub router - recursion + ( + $name:ident { $( $methods:item )* }, + $pattern:tt = (sub $router:ident) + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + paste::paste! { + router_type!{ + $name { + #[doc = "`" $name "` sub-router"] + pub fn [<$router:camel:snake>](&self) -> [<$router:camel>] { + // prefix for a sub can only contain literals + let current_prefix: &[&'static str] = pattern_to_prefix!($pattern); + let path = [&[self.prefix.as_str()][..], current_prefix].concat().join("/"); + [<$router:camel>]::sub(path) + } + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + } + }; + + // a sub-pattern - add a method for each handle inside it + ( + $name:ident + { $( $methods:item )* }, + $pattern:tt = { $( $sub_pattern:tt $( -> $sub_return_ty:path )? = $handle:tt, )* } + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + router_type!{ + $name { + $( + // join pattern with each sub-pattern + pattern_and_handler_to_method!( () [] $( $sub_return_ty )?, $handle, + $pattern, $sub_pattern + ); + )* + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + }; + + // pattern with a handle - add a method for the handle + ( + $name:ident + { $( $methods:item )* }, + $pattern:tt -> $return_type:path = $handle:tt + $( ,$tail_pattern:tt $( -> $tail_return_type:path )? = $tail:tt )* + ) => { + router_type!{ + $name { + pattern_and_handler_to_method!( () [] $return_type, $handle, $pattern ); + $( $methods )* + }, + $( $tail_pattern $( -> $tail_return_type )? = $tail ),* + } + }; +} + +/// Compile time tree patterns router with type-safe dynamic parameter parsing, +/// automatic routing, type-safe path constructors and optional client query +/// methods (enabled with `feature = "async-client"`). +/// +/// The `router!` macro implements greedy matching algorithm. +/// +/// ## Examples +/// +/// ```rust,ignore +/// router! {ROOT, +/// // This pattern matches `/pattern_a/something`, where `something` can be +/// // parsed with `FromStr` into `ArgType`. +/// ( "pattern_a" / [typed_dynamic_arg: ArgType] ) -> ReturnType = handler, +/// +/// ( "pattern_b" / [optional_dynamic_arg: opt ArgType] ) -> ReturnType = +/// handler, +/// +/// // Untyped dynamic arg is a string slice `&str` +/// ( "pattern_c" / [untyped_dynamic_arg] ) -> ReturnType = handler, +/// +/// // The handler additionally receives the `RequestQuery`, which can have +/// // some data attached, specified block height and ask for a proof. It +/// // returns `EncodedResponseQuery` (the `data` must be encoded, if +/// // necessary), which can have some `info` string and a proof. +/// ( "pattern_d" ) -> ReturnType = (with_options handler), +/// +/// ( "another" / "pattern" / "that" / "goes" / "deep" ) -> ReturnType = handler, +/// +/// // Inlined sub-tree +/// ( "subtree" / [this_is_fine: ArgType] ) = { +/// ( "a" ) -> u64 = a_handler, +/// ( "b" / [another_arg] ) -> u64 = b_handler, +/// } +/// +/// // Imported sub-router - The prefix can only have literal segments +/// ( "sub" / "no_dynamic_args" ) = (sub SUB_ROUTER), +/// } +/// +/// router! {SUB_ROUTER, +/// ( "pattern" ) -> ReturnType = handler, +/// } +/// ``` +/// +/// Handler functions used in the patterns should have the expected signature: +/// ```rust,ignore +/// fn handler(ctx: RequestCtx<'_, D, H>, args ...) +/// -> storage_api::Result +/// where +/// D: 'static + DB + for<'iter> DBIter<'iter> + Sync, +/// H: 'static + StorageHasher + Sync; +/// ``` +/// +/// If the handler wants to support request options, it can be defined as +/// `(with_options $handler)` and then the expected signature is: +/// ```rust,ignore +/// fn handler(ctx: RequestCtx<'_, D, H>, request: &RequestQuery, args +/// ...) -> storage_api::Result> +/// where +/// D: 'static + DB + for<'iter> DBIter<'iter> + Sync, +/// H: 'static + StorageHasher + Sync; +/// ``` +#[macro_export] +macro_rules! router { + { $name:ident, $( $pattern:tt $( -> $return_type:path )? = $handle:tt , )* } => ( + + // `paste!` is used to convert the $name cases for a derived type and function name + paste::paste! { + + router_type!{[<$name:camel>] {}, $( $pattern $( -> $return_type )? = $handle ),* } + + impl $crate::ledger::queries::Router for [<$name:camel>] { + // TODO: for some patterns, there's unused assignment of `$end` + #[allow(unused_assignments)] + fn internal_handle( + &self, + ctx: $crate::ledger::queries::RequestCtx<'_, D, H>, + request: &$crate::ledger::queries::RequestQuery, + start: usize + ) -> $crate::ledger::storage_api::Result<$crate::ledger::queries::EncodedResponseQuery> + where + D: 'static + $crate::ledger::storage::DB + for<'iter> $crate::ledger::storage::DBIter<'iter> + Sync, + H: 'static + $crate::ledger::storage::StorageHasher + Sync, + { + + // Import for `.into_storage_result()` + use $crate::ledger::storage_api::ResultExt; + + // Import helper from this crate used inside the macros + use $crate::ledger::queries::router::find_next_slash_index; + + $( + // This loop never repeats, it's only used for a breaking + // mechanism when a $pattern is not matched to skip to the + // next one, if any + loop { + let mut start = start; + // Try to match, parse args and invoke $handle, will + // break the `loop` not matched + try_match!(ctx, request, start, $handle, $pattern); + } + )* + + return Err( + $crate::ledger::queries::router::Error::WrongPath(request.path.clone())) + .into_storage_result(); + } + } + + #[doc = "`" $name "` path router"] + pub const $name: [<$name:camel>] = [<$name:camel>]::new(); + } + + ); +} + +/// You can expand the `handlers!` macro invocation with e.g.: +/// ```shell +/// cargo expand ledger::queries::router::test_rpc_handlers --features "ferveo-tpke, ibc-mocks, testing, wasm-runtime, tendermint-rpc" --tests --lib +/// ``` +#[cfg(test)] +mod test_rpc_handlers { + use borsh::BorshSerialize; + + use crate::ledger::queries::{ + EncodedResponseQuery, RequestCtx, RequestQuery, ResponseQuery, + }; + use crate::ledger::storage::{DBIter, StorageHasher, DB}; + use crate::ledger::storage_api::{self, ResultExt}; + use crate::types::storage::Epoch; + use crate::types::token; + + /// A little macro to generate boilerplate for RPC handler functions. + /// These are implemented to return their name as a String, joined by + /// slashes with their argument values turned `to_string()`, if any. + macro_rules! handlers { + ( + // name and params, if any + $( $name:ident $( ( $( $param:ident: $param_ty:ty ),* ) )? ),* + // optional trailing comma + $(,)? ) => { + $( + pub fn $name( + _ctx: RequestCtx<'_, D, H>, + $( $( $param: $param_ty ),* )? + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = stringify!($name).to_owned(); + $( $( + let data = format!("{data}/{}", $param); + )* )? + Ok(data) + } + )* + }; + } + + // Generate handler functions for the router below + handlers!( + a, + b0i, + b0ii, + b1, + b2i(balance: token::Amount), + b3(a1: token::Amount, a2: token::Amount, a3: token::Amount), + b3i(a1: token::Amount, a2: token::Amount, a3: token::Amount), + b3ii(a1: token::Amount, a2: token::Amount, a3: token::Amount), + x, + y(untyped_arg: &str), + z(untyped_arg: &str), + ); + + /// This handler is hand-written, because the test helper macro doesn't + /// support optional args. + pub fn b3iii( + _ctx: RequestCtx<'_, D, H>, + a1: token::Amount, + a2: token::Amount, + a3: Option, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "b3iii".to_owned(); + let data = format!("{data}/{}", a1); + let data = format!("{data}/{}", a2); + let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); + Ok(data) + } + + /// This handler is hand-written, because the test helper macro doesn't + /// support optional args. + pub fn b3iiii( + _ctx: RequestCtx<'_, D, H>, + a1: token::Amount, + a2: token::Amount, + a3: Option, + a4: Option, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "b3iiii".to_owned(); + let data = format!("{data}/{}", a1); + let data = format!("{data}/{}", a2); + let data = a3.map(|a3| format!("{data}/{}", a3)).unwrap_or(data); + let data = a4.map(|a4| format!("{data}/{}", a4)).unwrap_or(data); + Ok(data) + } + + /// This handler is hand-written, because the test helper macro doesn't + /// support handlers with `with_options`. + pub fn c( + _ctx: RequestCtx<'_, D, H>, + _request: &RequestQuery, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + let data = "c".to_owned().try_to_vec().into_storage_result()?; + Ok(ResponseQuery { + data, + ..ResponseQuery::default() + }) + } +} + +/// You can expand the `router!` macro invocation with e.g.: +/// ```shell +/// cargo expand ledger::queries::router::test_rpc --features "ferveo-tpke, ibc-mocks, testing, wasm-runtime, tendermint-rpc" --tests --lib +/// ``` +#[cfg(test)] +mod test_rpc { + use super::test_rpc_handlers::*; + use crate::types::storage::Epoch; + use crate::types::token; + + // Setup an RPC router for testing + router! {TEST_RPC, + ( "sub" ) = (sub TEST_SUB_RPC), + ( "a" ) -> String = a, + ( "b" ) = { + ( "0" ) = { + ( "i" ) -> String = b0i, + ( "ii" ) -> String = b0ii, + }, + ( "1" ) -> String = b1, + ( "2" ) = { + ( "i" / [balance: token::Amount] ) -> String = b2i, + }, + ( "3" / [a1: token::Amount] / [a2: token::Amount] ) = { + ( "i" / [a3: token:: Amount] ) -> String = b3i, + ( [a3: token:: Amount] ) -> String = b3, + ( [a3: token:: Amount] / "ii" ) -> String = b3ii, + ( [a3: opt token::Amount] / "iii" ) -> String = b3iii, + ( "iiii" / [a3: opt token::Amount] / "xyz" / [a4: opt Epoch] ) -> String = b3iiii, + }, + }, + ( "c" ) -> String = (with_options c), + } + + router! {TEST_SUB_RPC, + ( "x" ) -> String = x, + ( "y" / [untyped_arg] ) -> String = y, + ( "z" / [untyped_arg] ) -> String = z, + } +} + +#[cfg(test)] +mod test { + use super::test_rpc::TEST_RPC; + use crate::ledger::queries::testing::TestClient; + use crate::ledger::queries::{RequestCtx, RequestQuery, Router}; + use crate::ledger::storage_api; + use crate::types::storage::Epoch; + use crate::types::token; + + /// Test all the possible paths in `TEST_RPC` router. + #[tokio::test] + async fn test_router_macro() -> storage_api::Result<()> { + let client = TestClient::new(TEST_RPC); + + // Test request with an invalid path + let request = RequestQuery { + path: "/invalid".to_owned(), + ..RequestQuery::default() + }; + let ctx = RequestCtx { + storage: &client.storage, + vp_wasm_cache: client.vp_wasm_cache.clone(), + tx_wasm_cache: client.tx_wasm_cache.clone(), + }; + let result = TEST_RPC.handle(ctx, &request); + assert!(result.is_err()); + + // Test requests to valid paths using the router's methods + + let result = TEST_RPC.a(&client).await.unwrap(); + assert_eq!(result, "a"); + + let result = TEST_RPC.b0i(&client).await.unwrap(); + assert_eq!(result, "b0i"); + + let result = TEST_RPC.b0ii(&client).await.unwrap(); + assert_eq!(result, "b0ii"); + + let result = TEST_RPC.b1(&client).await.unwrap(); + assert_eq!(result, "b1"); + + let balance = token::Amount::from(123_000_000); + let result = TEST_RPC.b2i(&client, &balance).await.unwrap(); + assert_eq!(result, format!("b2i/{balance}")); + + let a1 = token::Amount::from(345); + let a2 = token::Amount::from(123_000); + let a3 = token::Amount::from(1_000_999); + let result = TEST_RPC.b3(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3i(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3i/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3ii(&client, &a1, &a2, &a3).await.unwrap(); + assert_eq!(result, format!("b3ii/{a1}/{a2}/{a3}")); + + let result = + TEST_RPC.b3iii(&client, &a1, &a2, &Some(a3)).await.unwrap(); + assert_eq!(result, format!("b3iii/{a1}/{a2}/{a3}")); + + let result = TEST_RPC.b3iii(&client, &a1, &a2, &None).await.unwrap(); + assert_eq!(result, format!("b3iii/{a1}/{a2}")); + + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &Some(a3), &None) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}/{a3}")); + + let a4 = Epoch::from(10); + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &Some(a3), &Some(a4)) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}/{a3}/{a4}")); + + let result = TEST_RPC + .b3iiii(&client, &a1, &a2, &None, &None) + .await + .unwrap(); + assert_eq!(result, format!("b3iiii/{a1}/{a2}")); + + let result = TEST_RPC.c(&client, None, None, false).await.unwrap(); + assert_eq!(result.data, format!("c")); + + let result = TEST_RPC.test_sub_rpc().x(&client).await.unwrap(); + assert_eq!(result, format!("x")); + + let arg = "test123"; + let result = TEST_RPC.test_sub_rpc().y(&client, arg).await.unwrap(); + assert_eq!(result, format!("y/{arg}")); + + let arg = "test321"; + let result = TEST_RPC.test_sub_rpc().z(&client, arg).await.unwrap(); + assert_eq!(result, format!("z/{arg}")); + + Ok(()) + } +} diff --git a/shared/src/ledger/queries/shell.rs b/shared/src/ledger/queries/shell.rs new file mode 100644 index 00000000000..3952d9fe81c --- /dev/null +++ b/shared/src/ledger/queries/shell.rs @@ -0,0 +1,364 @@ +use borsh::BorshSerialize; +use tendermint_proto::crypto::{ProofOp, ProofOps}; + +use crate::ledger::queries::types::{RequestCtx, RequestQuery}; +use crate::ledger::queries::{require_latest_height, EncodedResponseQuery}; +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, DB}; +use crate::ledger::storage_api::{self, ResultExt, StorageRead}; +use crate::types::hash::Hash; +use crate::types::storage::{self, Epoch, PrefixValue}; +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +use crate::types::transaction::TxResult; + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +router! {SHELL, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_value), + + // Dry run a transaction + ( "dry_run_tx" ) -> TxResult = (with_options dry_run_tx), + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_prefix), + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, + + // was the transaction accepted? + ( "accepted" / [tx_hash: Hash] ) -> bool = accepted, + + // was the transaction applied? + ( "applied" / [tx_hash: Hash] ) -> bool = applied, +} + +#[cfg(not(all(feature = "wasm-runtime", feature = "ferveo-tpke")))] +router! {SHELL, + // Epoch of the last committed block + ( "epoch" ) -> Epoch = epoch, + + // Raw storage access - read value + ( "value" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_value), + + // Raw storage access - prefix iterator + ( "prefix" / [storage_key: storage::Key] ) + -> Vec = (with_options storage_prefix), + + // Raw storage access - is given storage key present? + ( "has_key" / [storage_key: storage::Key] ) + -> bool = storage_has_key, + + // was the transaction accepted? + ( "accepted" / [tx_hash: Hash]) -> bool = accepted, + + // was the transaction applied? + ( "applied" / [tx_hash: Hash]) -> bool = applied, +} + +// Handlers: + +#[cfg(all(feature = "wasm-runtime", feature = "ferveo-tpke"))] +fn dry_run_tx( + mut ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + use crate::ledger::gas::BlockGasMeter; + use crate::ledger::protocol::{self, ShellParams}; + use crate::ledger::storage::write_log::WriteLog; + use crate::proto::Tx; + + let mut gas_meter = BlockGasMeter::default(); + let mut write_log = WriteLog::default(); + let tx = Tx::try_from(&request.data[..]).into_storage_result()?; + let data = protocol::apply_wasm_tx( + tx, + request.data.len(), + ShellParams { + block_gas_meter: &mut gas_meter, + write_log: &mut write_log, + storage: &ctx.storage, + vp_wasm_cache: &mut ctx.vp_wasm_cache, + tx_wasm_cache: &mut ctx.tx_wasm_cache, + }, + ) + .into_storage_result()?; + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { + data, + proof_ops: None, + info: Default::default(), + }) +} + +fn epoch(ctx: RequestCtx<'_, D, H>) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let data = ctx.storage.last_epoch; + Ok(data) +} + +/// Returns data with `vec![]` when the storage key is not found. For all +/// borsh-encoded types, it is safe to check `data.is_empty()` to see if the +/// value was found, except for unit - see `fn query_storage_value` in +/// `apps/src/lib/client/rpc.rs` for unit type handling via `storage_has_key`. +fn storage_value( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + match ctx + .storage + .read_with_height(&storage_key, request.height) + .into_storage_result()? + { + (Some(value), _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_existence_proof( + &storage_key, + value.clone().into(), + request.height, + ) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(EncodedResponseQuery { + data: value, + proof_ops: proof, + info: Default::default(), + }) + } + (None, _gas) => { + let proof = if request.prove { + let proof = ctx + .storage + .get_non_existence_proof(&storage_key, request.height) + .into_storage_result()?; + Some(proof.into()) + } else { + None + }; + Ok(EncodedResponseQuery { + data: vec![], + proof_ops: proof, + info: format!("No value found for key: {}", storage_key), + }) + } + } +} + +fn storage_prefix( + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + storage_key: storage::Key, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + require_latest_height(&ctx, request)?; + + let (iter, _gas) = ctx.storage.iter_prefix(&storage_key); + let data: storage_api::Result> = iter + .map(|(key, value, _gas)| { + let key = storage::Key::parse(key).into_storage_result()?; + Ok(PrefixValue { key, value }) + }) + .collect(); + let data = data?; + let proof_ops = if request.prove { + let mut ops = vec![]; + for PrefixValue { key, value } in &data { + let proof = ctx + .storage + .get_existence_proof(key, value.clone().into(), request.height) + .into_storage_result()?; + let mut cur_ops: Vec = + proof.ops.into_iter().map(|op| op.into()).collect(); + ops.append(&mut cur_ops); + } + // ops is not empty in this case + Some(ProofOps { ops }) + } else { + None + }; + let data = data.try_to_vec().into_storage_result()?; + Ok(EncodedResponseQuery { + data, + proof_ops, + ..Default::default() + }) +} + +fn storage_has_key( + ctx: RequestCtx<'_, D, H>, + storage_key: storage::Key, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + let data = StorageRead::has_key(ctx.storage, &storage_key)?; + Ok(data) +} + +fn accepted( + _ctx: RequestCtx<'_, D, H>, + _tx_hash: Hash, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + todo!("pending reimplementation with new query router") +} + +fn applied( + _ctx: RequestCtx<'_, D, H>, + _hash: Hash, +) -> storage_api::Result +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + todo!("pending reimplementation with new query router") +} + +#[cfg(test)] +mod test { + use borsh::BorshDeserialize; + + use crate::ledger::queries::testing::TestClient; + use crate::ledger::queries::RPC; + use crate::ledger::storage_api::{self, StorageWrite}; + use crate::proto::Tx; + use crate::types::{address, token}; + + const TX_NO_OP_WASM: &str = "../wasm_for_tests/tx_no_op.wasm"; + + #[test] + fn test_shell_queries_router_paths() { + let path = RPC.shell().epoch_path(); + assert_eq!("/shell/epoch", path); + + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let key = token::balance_key(&token_addr, &owner); + let path = RPC.shell().storage_value_path(&key); + assert_eq!(format!("/shell/value/{}", key), path); + + let path = RPC.shell().dry_run_tx_path(); + assert_eq!("/shell/dry_run_tx", path); + + let path = RPC.shell().storage_prefix_path(&key); + assert_eq!(format!("/shell/prefix/{}", key), path); + + let path = RPC.shell().storage_has_key_path(&key); + assert_eq!(format!("/shell/has_key/{}", key), path); + } + + #[tokio::test] + async fn test_shell_queries_router_with_client() -> storage_api::Result<()> + { + // Initialize the `TestClient` + let mut client = TestClient::new(RPC); + + // Request last committed epoch + let read_epoch = RPC.shell().epoch(&client).await.unwrap(); + let current_epoch = client.storage.last_epoch; + assert_eq!(current_epoch, read_epoch); + + // Request dry run tx + let tx_no_op = std::fs::read(TX_NO_OP_WASM).expect("cannot load wasm"); + let tx = Tx::new(tx_no_op, None); + let tx_bytes = tx.to_bytes(); + let result = RPC + .shell() + .dry_run_tx(&client, Some(tx_bytes), None, false) + .await + .unwrap(); + assert!(result.data.is_accepted()); + + // Request storage value for a balance key ... + let token_addr = address::testing::established_address_1(); + let owner = address::testing::established_address_2(); + let balance_key = token::balance_key(&token_addr, &owner); + // ... there should be no value yet. + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert!(read_balance.data.is_empty()); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert!(read_balances.data.is_empty()); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(!has_balance_key); + + // Then write some balance ... + let balance = token::Amount::from(1000); + StorageWrite::write(&mut client.storage, &balance_key, balance)?; + // ... there should be the same value now + let read_balance = RPC + .shell() + .storage_value(&client, None, None, false, &balance_key) + .await + .unwrap(); + assert_eq!( + balance, + token::Amount::try_from_slice(&read_balance.data).unwrap() + ); + + // Request storage prefix iterator + let balance_prefix = token::balance_prefix(&token_addr); + let read_balances = RPC + .shell() + .storage_prefix(&client, None, None, false, &balance_prefix) + .await + .unwrap(); + assert_eq!(read_balances.data.len(), 1); + + // Request storage has key + let has_balance_key = RPC + .shell() + .storage_has_key(&client, &balance_key) + .await + .unwrap(); + assert!(has_balance_key); + + Ok(()) + } +} diff --git a/shared/src/ledger/queries/types.rs b/shared/src/ledger/queries/types.rs new file mode 100644 index 00000000000..e2092b4f6ea --- /dev/null +++ b/shared/src/ledger/queries/types.rs @@ -0,0 +1,172 @@ +use tendermint_proto::crypto::ProofOps; + +use crate::ledger::storage::traits::StorageHasher; +use crate::ledger::storage::{DBIter, Storage, DB}; +use crate::ledger::storage_api; +use crate::types::storage::BlockHeight; +#[cfg(feature = "wasm-runtime")] +use crate::vm::wasm::{TxCache, VpCache}; +#[cfg(feature = "wasm-runtime")] +use crate::vm::WasmCacheRoAccess; + +/// A request context provides read-only access to storage and WASM compilation +/// caches to request handlers. +#[derive(Debug, Clone)] +pub struct RequestCtx<'a, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, +{ + /// Storage access + pub storage: &'a Storage, + /// VP WASM compilation cache + #[cfg(feature = "wasm-runtime")] + pub vp_wasm_cache: VpCache, + /// tx WASM compilation cache + #[cfg(feature = "wasm-runtime")] + pub tx_wasm_cache: TxCache, +} + +/// A `Router` handles parsing read-only query requests and dispatching them to +/// their handler functions. A valid query returns a borsh-encoded result. +pub trait Router { + /// Handle a given request using the provided context. This must be invoked + /// on the root `Router` to be able to match the `request.path` fully. + fn handle( + &self, + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync, + { + self.internal_handle(ctx, request, 0) + } + + /// Internal method which shouldn't be invoked directly. Instead, you may + /// want to call `self.handle()`. + /// + /// Handle a given request using the provided context, starting to + /// try to match `request.path` against the `Router`'s patterns at the + /// given `start` offset. + fn internal_handle( + &self, + ctx: RequestCtx<'_, D, H>, + request: &RequestQuery, + start: usize, + ) -> storage_api::Result + where + D: 'static + DB + for<'iter> DBIter<'iter> + Sync, + H: 'static + StorageHasher + Sync; +} + +/// A client with async request dispatcher method, which can be used to invoke +/// type-safe methods from a root [`Router`], generated via `router!` macro. +#[cfg(any(test, feature = "async-client"))] +#[async_trait::async_trait] +pub trait Client { + /// `std::io::Error` can happen in decoding with + /// `BorshDeserialize::try_from_slice` + type Error: From; + + /// Send a simple query request at the given path. For more options, use the + /// `request` method. + async fn simple_request( + &self, + path: String, + ) -> Result, Self::Error> { + self.request(path, None, None, false) + .await + .map(|response| response.data) + } + + /// Send a query request at the given path. + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result; +} + +/// Temporary domain-type for `tendermint_proto::abci::RequestQuery`, copied +/// from +/// until we are on a branch that has it included. +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct RequestQuery { + /// Raw query bytes. + /// + /// Can be used with or in lieu of `path`. + pub data: Vec, + /// Path of the request, like an HTTP `GET` path. + /// + /// Can be used with or in lieu of `data`. + /// + /// Applications MUST interpret `/store` as a query by key on the + /// underlying store. The key SHOULD be specified in the Data field. + /// Applications SHOULD allow queries over specific types like + /// `/accounts/...` or `/votes/...`. + pub path: String, + /// The block height for which the query should be executed. + /// + /// The default `0` returns data for the latest committed block. Note that + /// this is the height of the block containing the application's Merkle + /// root hash, which represents the state as it was after committing + /// the block at `height - 1`. + pub height: BlockHeight, + /// Whether to return a Merkle proof with the response, if possible. + pub prove: bool, +} + +/// Generic response from a query +#[derive(Clone, Debug, Default)] +pub struct ResponseQuery { + /// Response data to be borsh encoded + pub data: T, + /// Non-deterministic log of the request execution + pub info: String, + /// Optional proof - used for storage value reads which request `prove` + pub proof_ops: Option, +} + +/// [`ResponseQuery`] with borsh-encoded `data` field +pub type EncodedResponseQuery = ResponseQuery>; + +impl RequestQuery { + /// Try to convert tendermint RequestQuery into our [`RequestQuery`] + /// domain type. This tries to convert the block height into our + /// [`BlockHeight`] type, where `0` is treated as a special value to signal + /// to use the latest committed block height as per tendermint ABCI Query + /// spec. A negative block height will cause an error. + pub fn try_from_tm( + storage: &Storage, + tendermint_proto::abci::RequestQuery { + data, + path, + height, + prove, + }: tendermint_proto::abci::RequestQuery, + ) -> Result + where + D: DB + for<'iter> DBIter<'iter>, + H: StorageHasher, + { + let height = match height { + 0 => { + // `0` means last committed height + storage.last_height + } + _ => BlockHeight(height.try_into().map_err(|_| { + format!("Query height cannot be negative, got: {}", height) + })?), + }; + Ok(Self { + data, + path, + height, + prove, + }) + } +} diff --git a/shared/src/ledger/slash_fund/mod.rs b/shared/src/ledger/slash_fund/mod.rs index e5ef72b5fe7..945be4eb643 100644 --- a/shared/src/ledger/slash_fund/mod.rs +++ b/shared/src/ledger/slash_fund/mod.rs @@ -13,7 +13,7 @@ use super::governance::vp::is_proposal_accepted; use super::storage::traits::StorageHasher; use crate::ledger::native_vp::{self, Ctx, NativeVp}; use crate::ledger::storage::{self as ledger_storage}; -use crate::types::address::{xan as nam, Address, InternalAddress}; +use crate::types::address::{nam, Address, InternalAddress}; use crate::types::storage::Key; use crate::types::token; use crate::vm::WasmCacheAccess; diff --git a/shared/src/ledger/storage/mod.rs b/shared/src/ledger/storage/mod.rs index 805caa63486..5ee2f57ffd5 100644 --- a/shared/src/ledger/storage/mod.rs +++ b/shared/src/ledger/storage/mod.rs @@ -24,7 +24,7 @@ use crate::ledger::storage::merkle_tree::{ pub use crate::ledger::storage::merkle_tree::{ MerkleTree, MerkleTreeStoresRead, MerkleTreeStoresWrite, StoreType, }; -use crate::ledger::storage::traits::StorageHasher; +pub use crate::ledger::storage::traits::StorageHasher; use crate::tendermint::merkle::proof::Proof; use crate::types::address::{Address, EstablishedAddressGen, InternalAddress}; use crate::types::chain::{ChainId, CHAIN_ID_LENGTH}; diff --git a/shared/src/types/address.rs b/shared/src/types/address.rs index 74ab4f53fe4..4298cca513e 100644 --- a/shared/src/types/address.rs +++ b/shared/src/types/address.rs @@ -499,7 +499,7 @@ impl Display for InternalAddress { } /// Temporary helper for testing -pub fn xan() -> Address { +pub fn nam() -> Address { Address::decode("atest1v4ehgw36x3prswzxggunzv6pxqmnvdj9xvcyzvpsggeyvs3cg9qnywf589qnwvfsg5erg3fkl09rg5").expect("The token address decoding shouldn't fail") } @@ -537,7 +537,7 @@ pub fn kartoffel() -> Address { /// informal currency codes. pub fn tokens() -> HashMap { vec![ - (xan(), "XAN"), + (nam(), "NAM"), (btc(), "BTC"), (eth(), "ETH"), (dot(), "DOT"), diff --git a/shared/src/types/hash.rs b/shared/src/types/hash.rs index 4d3d01d4c81..99d058967d8 100644 --- a/shared/src/types/hash.rs +++ b/shared/src/types/hash.rs @@ -2,6 +2,7 @@ use std::fmt::{self, Display}; use std::ops::Deref; +use std::str::FromStr; use arse_merkle_tree::traits::Value; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -116,6 +117,14 @@ impl From for transaction::Hash { } } +impl FromStr for Hash { + type Err = self::Error; + + fn from_str(str: &str) -> Result { + Self::try_from(str) + } +} + impl Hash { /// Compute sha256 of some bytes pub fn sha256(data: impl AsRef<[u8]>) -> Self { diff --git a/shared/src/types/mod.rs b/shared/src/types/mod.rs index 5690f00ec9f..78221659b5a 100644 --- a/shared/src/types/mod.rs +++ b/shared/src/types/mod.rs @@ -11,7 +11,6 @@ pub mod ibc; pub mod internal; pub mod keccak; pub mod key; -pub mod nft; pub mod storage; pub mod time; pub mod token; diff --git a/shared/src/types/nft.rs b/shared/src/types/nft.rs deleted file mode 100644 index ebfff7d5cef..00000000000 --- a/shared/src/types/nft.rs +++ /dev/null @@ -1,369 +0,0 @@ -//! Nft types -use std::fmt; - -use borsh::{BorshDeserialize, BorshSerialize}; -use serde::{Deserialize, Serialize}; - -use super::address::Address; -use super::storage::{DbKeySeg, Key, KeySeg}; - -const NFT_KEY: &str = "nft"; -const TAG_KEY: &str = "tag"; -const CREATOR_KEY: &str = "creator"; -const KEYS: &str = "keys"; -const OPTIONAL_KEYS: &str = "optional_keys"; -const METADATA_KEY: &str = "metadata"; -const APPROVALS_KEY: &str = "approvals"; -const BURNT_KEY: &str = "burnt"; -const IDS_KEY: &str = "ids"; -const CURRENT_OWNER_KEY: &str = "current_owner"; -const PAST_OWNERS_KEY: &str = "past_owners"; -const VALUE_KEY: &str = "value"; -const OPTIONAL_VALUE: &str = "optional_value"; - -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - Serialize, - Deserialize, - Eq, - PartialEq, - Hash, - PartialOrd, -)] -/// Nft Version tag -pub enum NftTag { - /// Tag v1 - V1, -} - -impl Default for NftTag { - fn default() -> Self { - Self::V1 - } -} - -impl fmt::Display for NftTag { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - NftTag::V1 => write!(f, "v1"), - } - } -} -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - Serialize, - Deserialize, - Eq, - PartialEq, - Hash, - PartialOrd, -)] -/// The definition of an NFT -pub struct Nft { - #[serde(default)] - /// Nft version - pub tag: NftTag, - /// The source address - pub creator: Address, - /// The path to a file containing the validity predicate associated with - /// the NFT - pub vp_path: Option, - /// Mandatory NFT fields - pub keys: Vec, - #[serde(default = "default_opt_keys")] - /// Optional NFT fields - pub opt_keys: Vec, - /// The list of tokens - pub tokens: Vec, -} - -impl fmt::Display for Nft { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "tag: {}, creator: {}, tokens: {:?}", - self.tag, self.creator, self.tokens - ) - } -} - -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - Serialize, - Deserialize, - Eq, - PartialEq, - Hash, - PartialOrd, -)] -/// The definition of an NFT token -pub struct NftToken { - /// The token id - pub id: u64, - /// The URI containing metadata - pub metadata: String, - /// Current owner - pub current_owner: Option
, - /// Past owners - #[serde(default = "default_past_owners")] - pub past_owners: Vec
, - /// Approved addresses - pub approvals: Vec
, - /// Mandatory fields values - pub values: Vec, - #[serde(default = "default_opt_values")] - /// Optionals fields values - pub opt_values: Vec, - #[serde(default = "default_burnt")] - /// Is token burnt - pub burnt: bool, -} - -impl fmt::Display for NftToken { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "id: {}, metadata: {}, values: {:?}", - self.id, self.metadata, self.values - ) - } -} - -fn default_opt_keys() -> Vec { - Vec::new() -} - -fn default_past_owners() -> Vec
{ - Vec::new() -} - -fn default_opt_values() -> Vec { - Vec::new() -} - -fn default_burnt() -> bool { - false -} - -/// Get the nft prefix -pub fn _nft_prefix(address: &Address) -> Key { - Key::from(address.to_db_key()) - .push(&NFT_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft token prefix -pub fn _nft_token_prefix(address: &Address, token_id: &str) -> Key { - _nft_prefix(address) - .push(&IDS_KEY.to_owned()) - .expect("Cannot obtain a storage key") - .push(&token_id.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft owner storage key -pub fn get_tag_key(address: &Address) -> Key { - _nft_prefix(address) - .push(&TAG_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft owner storage key -pub fn get_creator_key(address: &Address) -> Key { - _nft_prefix(address) - .push(&CREATOR_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft keys storage key -pub fn get_keys_key(address: &Address) -> Key { - _nft_prefix(address) - .push(&KEYS.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft keys storage key -pub fn get_optional_keys_key(address: &Address) -> Key { - _nft_prefix(address) - .push(&OPTIONAL_KEYS.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft metadata storage key -pub fn get_token_metadata_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&METADATA_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft current_owner storage key -pub fn get_token_current_owner_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&CURRENT_OWNER_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft current_owner storage key -pub fn get_token_past_owners_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&PAST_OWNERS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft value storage key -pub fn get_token_value_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&VALUE_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft optional value storage key -pub fn get_token_optional_value_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&OPTIONAL_VALUE.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft burnt storage key -pub fn get_token_burnt_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&BURNT_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Get the nft approval storage key -pub fn get_token_approval_key(address: &Address, nft_id: &str) -> Key { - _nft_token_prefix(address, nft_id) - .push(&APPROVALS_KEY.to_owned()) - .expect("Cannot obtain a storage key") -} - -/// Check that nft is created by a specific creator address -pub fn is_nft_creator_key(key: &Key, address: &Address) -> Option
{ - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(nft_addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(creator_key), - ] if nft_addr == address - && prefix == NFT_KEY - && creator_key == CREATOR_KEY => - { - Some(nft_addr.to_owned()) - } - _ => None, - } -} - -/// Check that a particular key is a approval storage key -pub fn is_nft_approval_key( - key: &Key, - address: &Address, -) -> Option<(Address, String)> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(nft_addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(ids_key), - DbKeySeg::StringSeg(token_id_key), - DbKeySeg::StringSeg(approval_key), - ] if nft_addr == address - && prefix == NFT_KEY - && ids_key == IDS_KEY - && approval_key == APPROVALS_KEY => - { - Some((nft_addr.to_owned(), token_id_key.to_owned())) - } - _ => None, - } -} - -/// Check that a particular key is a metadata storage key -pub fn is_nft_metadata_key( - key: &Key, - address: &Address, -) -> Option<(Address, String)> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(nft_addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(ids_key), - DbKeySeg::StringSeg(token_id_key), - DbKeySeg::StringSeg(metadata_key), - ] if nft_addr == address - && prefix == NFT_KEY - && ids_key == IDS_KEY - && metadata_key == METADATA_KEY => - { - Some((nft_addr.to_owned(), token_id_key.to_owned())) - } - _ => None, - } -} - -/// Check that a particular key is a current_owner storage key -pub fn is_nft_current_owner_key( - key: &Key, - address: &Address, -) -> Option<(Address, String)> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(nft_addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(ids_key), - DbKeySeg::StringSeg(token_id_key), - DbKeySeg::StringSeg(current_owner_key), - ] if nft_addr == address - && prefix == NFT_KEY - && ids_key == IDS_KEY - && current_owner_key == CURRENT_OWNER_KEY => - { - Some((nft_addr.to_owned(), token_id_key.to_owned())) - } - _ => None, - } -} - -/// Check that a particular key is a past_owners storage key -pub fn is_nft_past_owners_key( - key: &Key, - address: &Address, -) -> Option<(Address, String)> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(nft_addr), - DbKeySeg::StringSeg(prefix), - DbKeySeg::StringSeg(ids_key), - DbKeySeg::StringSeg(token_id_key), - DbKeySeg::StringSeg(past_owners_key), - ] if nft_addr == address - && prefix == NFT_KEY - && ids_key == IDS_KEY - && past_owners_key == PAST_OWNERS_KEY => - { - Some((nft_addr.to_owned(), token_id_key.to_owned())) - } - _ => None, - } -} - -/// Check that a key points to a nft storage key -pub fn is_nft_key(key: &Key) -> Option<&Address> { - match &key.segments[..] { - [ - DbKeySeg::AddressSeg(nft_addr), - DbKeySeg::StringSeg(prefix), - .., - ] if prefix == NFT_KEY => Some(nft_addr), - _ => None, - } -} diff --git a/shared/src/types/transaction/mod.rs b/shared/src/types/transaction/mod.rs index b9fabcfd6b9..fc0c54c2381 100644 --- a/shared/src/types/transaction/mod.rs +++ b/shared/src/types/transaction/mod.rs @@ -7,8 +7,6 @@ pub mod decrypted; pub mod encrypted; /// txs to manage governance pub mod governance; -/// txs to manage nfts -pub mod nft; pub mod pos; /// transaction protocols made by validators pub mod protocol; @@ -348,7 +346,7 @@ pub mod tx_types { #[cfg(test)] mod test_process_tx { use super::*; - use crate::types::address::xan; + use crate::types::address::nam; use crate::types::storage::Epoch; fn gen_keypair() -> common::SecretKey { @@ -432,7 +430,7 @@ pub mod tx_types { let wrapper = WrapperTx::new( Fee { amount: 10.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -467,7 +465,7 @@ pub mod tx_types { let wrapper = WrapperTx::new( Fee { amount: 10.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), diff --git a/shared/src/types/transaction/nft.rs b/shared/src/types/transaction/nft.rs deleted file mode 100644 index 10d5ffa43f0..00000000000 --- a/shared/src/types/transaction/nft.rs +++ /dev/null @@ -1,49 +0,0 @@ -use borsh::{BorshDeserialize, BorshSerialize}; -use serde::{Deserialize, Serialize}; - -use crate::types::address::Address; -use crate::types::nft::NftToken; - -/// A tx data type to create a new NFT -#[derive( - Debug, - Clone, - PartialEq, - BorshSerialize, - BorshDeserialize, - Serialize, - Deserialize, -)] -pub struct CreateNft { - /// Nft version - pub tag: String, - /// The creator address - pub creator: Address, - /// The nft vp code - pub vp_code: Vec, - /// The nft keys - pub keys: Vec, - /// The nft optional keys - pub opt_keys: Vec, - /// The nft tokens descriptions - pub tokens: Vec, -} - -/// A tx data type to mint nft tokens -#[derive( - Debug, - Clone, - PartialEq, - BorshSerialize, - BorshDeserialize, - Serialize, - Deserialize, -)] -pub struct MintNft { - /// The nft address - pub address: Address, - /// The creator address - pub creator: Address, - /// The nft tokens - pub tokens: Vec, -} diff --git a/shared/src/types/transaction/wrapper.rs b/shared/src/types/transaction/wrapper.rs index ad27b807aae..e0e6dab6be6 100644 --- a/shared/src/types/transaction/wrapper.rs +++ b/shared/src/types/transaction/wrapper.rs @@ -337,7 +337,7 @@ pub mod wrapper_tx { mod test_wrapper_tx { use super::*; use crate::proto::SignedTxData; - use crate::types::address::xan; + use crate::types::address::nam; fn gen_keypair() -> common::SecretKey { use rand::prelude::ThreadRng; @@ -360,7 +360,7 @@ pub mod wrapper_tx { let wrapper = WrapperTx::new( Fee { amount: 10.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), @@ -386,7 +386,7 @@ pub mod wrapper_tx { let mut wrapper = WrapperTx::new( Fee { amount: 10.into(), - token: xan(), + token: nam(), }, &gen_keypair(), Epoch(0), @@ -418,7 +418,7 @@ pub mod wrapper_tx { let mut tx = WrapperTx::new( Fee { amount: 10.into(), - token: xan(), + token: nam(), }, &keypair, Epoch(0), diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 5fc1db6da0f..39fe9bcf1f5 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_tests" resolver = "2" -version = "0.8.1" +version = "0.9.0" [features] default = ["wasm-runtime"] diff --git a/tests/src/e2e/helpers.rs b/tests/src/e2e/helpers.rs index 705c8227609..d0dd60a6885 100644 --- a/tests/src/e2e/helpers.rs +++ b/tests/src/e2e/helpers.rs @@ -3,6 +3,7 @@ use std::path::Path; use std::process::Command; use std::str::FromStr; +use std::time::{Duration, Instant}; use std::{env, time}; use color_eyre::eyre::Result; @@ -14,7 +15,7 @@ use namada::types::key::*; use namada::types::storage::Epoch; use namada_apps::config::{Config, TendermintMode}; -use super::setup::{Test, ENV_VAR_DEBUG, ENV_VAR_USE_PREBUILT_BINARIES}; +use super::setup::{sleep, Test, ENV_VAR_DEBUG, ENV_VAR_USE_PREBUILT_BINARIES}; use crate::e2e::setup::{Bin, Who, APPS_PACKAGE}; use crate::run; @@ -148,6 +149,63 @@ pub fn get_epoch(test: &Test, ledger_address: &str) -> Result { Ok(Epoch(epoch)) } +/// Get the last committed block height. +pub fn get_height(test: &Test, ledger_address: &str) -> Result { + let mut find = run!( + test, + Bin::Client, + &["block", "--ledger-address", ledger_address], + Some(10) + )?; + let (unread, matched) = find.exp_regex("Last committed block ID: .*")?; + // Expected `matched` string is e.g.: + // + // ``` + // Last committed block F10B5E77F972F68CA051D289474B6E75574B446BF713A7B7B71D7ECFC61A3B21, height: 4, time: 2022-10-20T10:52:28.828745Z + // ``` + let height_str = strip_trailing_newline(&matched) + .trim() + // Find the height part ... + .split_once("height: ") + .unwrap() + // ... take what's after it ... + .1 + // ... find the next comma ... + .rsplit_once(',') + .unwrap() + // ... and take what's before it. + .0; + u64::from_str(height_str).map_err(|e| { + eyre!(format!( + "Height parsing failed from {} trimmed from {}, Error: \ + {}\n\nUnread output: {}", + height_str, matched, e, unread + )) + }) +} + +/// Sleep until the given height is reached or panic when time out is reached +/// before the height +pub fn wait_for_block_height( + test: &Test, + ledger_address: &str, + height: u64, + timeout_secs: u64, +) -> Result<()> { + let start = Instant::now(); + let loop_timeout = Duration::new(timeout_secs, 0); + loop { + let current = get_height(test, ledger_address)?; + if current >= height { + break Ok(()); + } + if Instant::now().duration_since(start) > loop_timeout { + panic!("Timed out waiting for height {height}, current {current}"); + } + sleep(1); + } +} + pub fn generate_bin_command(bin_name: &str, manifest_path: &Path) -> Command { let use_prebuilt_binaries = match env::var(ENV_VAR_USE_PREBUILT_BINARIES) { Ok(var) => var.to_ascii_lowercase() != "false", diff --git a/tests/src/e2e/ledger_tests.rs b/tests/src/e2e/ledger_tests.rs index 45bf8392ae3..c5084d69181 100644 --- a/tests/src/e2e/ledger_tests.rs +++ b/tests/src/e2e/ledger_tests.rs @@ -24,6 +24,7 @@ use namada_apps::config::genesis::genesis_config::{ use serde_json::json; use setup::constants::*; +use super::helpers::{get_height, wait_for_block_height}; use super::setup::{disable_eth_fullnode, get_all_wasms_hashes}; use crate::e2e::helpers::{ find_address, find_voting_power, get_actor_rpc, get_epoch, @@ -105,7 +106,7 @@ fn test_node_connectivity() -> Result<()> { "--target", ALBERT, "--token", - XAN, + NAM, "--amount", "10.1", "--fee-amount", @@ -113,7 +114,7 @@ fn test_node_connectivity() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -134,25 +135,31 @@ fn test_node_connectivity() -> Result<()> { let _bg_validator_0 = validator_0.background(); let _bg_validator_1 = validator_1.background(); + let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_1_rpc = get_actor_rpc(&test, &Who::Validator(1)); + let non_validator_rpc = get_actor_rpc(&test, &Who::NonValidator); + + // Find the block height on the validator + let after_tx_height = get_height(&test, &validator_0_rpc)?; + + // Wait for the non-validator to be synced to at least the same height + wait_for_block_height(&test, &non_validator_rpc, after_tx_height, 10)?; + let query_balance_args = |ledger_rpc| { vec![ "balance", "--owner", ALBERT, "--token", - XAN, + NAM, "--ledger-address", ledger_rpc, ] }; - - let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); - let validator_1_rpc = get_actor_rpc(&test, &Who::Validator(1)); - let non_validator_rpc = get_actor_rpc(&test, &Who::NonValidator); for ledger_rpc in &[validator_0_rpc, validator_1_rpc, non_validator_rpc] { let mut client = run!(test, Bin::Client, query_balance_args(ledger_rpc), Some(40))?; - client.exp_string("XAN: 1000010.1")?; + client.exp_string("NAM: 1000010.1")?; client.assert_success(); } @@ -301,7 +308,7 @@ fn ledger_txs_and_queries() -> Result<()> { "--target", ALBERT, "--token", - XAN, + NAM, "--amount", "10.1", "--fee-amount", @@ -309,7 +316,7 @@ fn ledger_txs_and_queries() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ], @@ -326,7 +333,7 @@ fn ledger_txs_and_queries() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ], @@ -344,7 +351,7 @@ fn ledger_txs_and_queries() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc ], @@ -365,7 +372,7 @@ fn ledger_txs_and_queries() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ], @@ -397,12 +404,12 @@ fn ledger_txs_and_queries() -> Result<()> { "--owner", BERTHA, "--token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ], // expect a decimal - r"XAN: \d+(\.\d+)?", + r"NAM: \d+(\.\d+)?", ), ]; for (query_args, expected) in &query_args_and_expected_response { @@ -414,8 +421,8 @@ fn ledger_txs_and_queries() -> Result<()> { let christel = find_address(&test, CHRISTEL)?; // as setup in `genesis/e2e-tests-single-node.toml` let christel_balance = token::Amount::whole(1000000); - let xan = find_address(&test, XAN)?; - let storage_key = token::balance_key(&xan, &christel).to_string(); + let nam = find_address(&test, NAM)?; + let storage_key = token::balance_key(&nam, &christel).to_string(); let query_args_and_expected_response = vec![ // 7. Query storage key and get hex-encoded raw bytes ( @@ -466,7 +473,7 @@ fn invalid_transactions() -> Result<()> { let transfer = token::Transfer { source: find_address(&test, DAEWON)?, target: find_address(&test, ALBERT)?, - token: find_address(&test, XAN)?, + token: find_address(&test, NAM)?, sub_prefix: None, amount: token::Amount::whole(1), }; @@ -493,7 +500,7 @@ fn invalid_transactions() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -547,7 +554,7 @@ fn invalid_transactions() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, // Force to ignore client check that fails on the balance check of the // source address "--force", @@ -625,7 +632,7 @@ fn pos_bonds() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -649,7 +656,7 @@ fn pos_bonds() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -670,7 +677,7 @@ fn pos_bonds() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -694,7 +701,7 @@ fn pos_bonds() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -735,7 +742,7 @@ fn pos_bonds() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -757,7 +764,7 @@ fn pos_bonds() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -774,7 +781,7 @@ fn pos_bonds() -> Result<()> { /// 1. Run the ledger node with shorter epochs for faster progression /// 2. Initialize a new validator account /// 3. Submit a delegation to the new validator -/// 4. Transfer some XAN to the new validator +/// 4. Transfer some NAM to the new validator /// 5. Submit a self-bond for the new validator /// 6. Wait for the pipeline epoch /// 7. Check the new validator's voting power @@ -829,7 +836,7 @@ fn pos_init_validator() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -847,7 +854,7 @@ fn pos_init_validator() -> Result<()> { "--target", &new_validator_key, "--token", - XAN, + NAM, "--amount", "0.5", "--fee-amount", @@ -855,7 +862,7 @@ fn pos_init_validator() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -877,7 +884,7 @@ fn pos_init_validator() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -886,7 +893,7 @@ fn pos_init_validator() -> Result<()> { client.exp_string("Transaction is valid.")?; client.assert_success(); - // 4. Transfer some XAN to the new validator + // 4. Transfer some NAM to the new validator let tx_args = vec![ "transfer", "--source", @@ -894,7 +901,7 @@ fn pos_init_validator() -> Result<()> { "--target", new_validator, "--token", - XAN, + NAM, "--amount", "10999.5", "--fee-amount", @@ -902,7 +909,7 @@ fn pos_init_validator() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -923,7 +930,7 @@ fn pos_init_validator() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -994,7 +1001,7 @@ fn ledger_many_txs_in_a_block() -> Result<()> { "--target", ALBERT, "--token", - XAN, + NAM, "--amount", "10.1", "--fee-amount", @@ -1002,7 +1009,7 @@ fn ledger_many_txs_in_a_block() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", ]); @@ -1113,7 +1120,7 @@ fn proposal_submission() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -1186,13 +1193,13 @@ fn proposal_submission() -> Result<()> { "--owner", ALBERT, "--token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("XAN: 999500")?; + client.exp_string("NAM: 999500")?; client.assert_success(); // 5. Query token balance governance @@ -1201,13 +1208,13 @@ fn proposal_submission() -> Result<()> { "--owner", GOVERNANCE_ADDRESS, "--token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("XAN: 500")?; + client.exp_string("NAM: 500")?; client.assert_success(); // 6. Submit an invalid proposal @@ -1288,13 +1295,13 @@ fn proposal_submission() -> Result<()> { "--owner", ALBERT, "--token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(40))?; - client.exp_string("XAN: 999500")?; + client.exp_string("NAM: 999500")?; client.assert_success(); // 9. Send a yay vote from a validator @@ -1396,13 +1403,13 @@ fn proposal_submission() -> Result<()> { "--owner", ALBERT, "--token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("XAN: 1000000")?; + client.exp_string("NAM: 1000000")?; client.assert_success(); // 13. Check if governance funds are 0 @@ -1411,13 +1418,13 @@ fn proposal_submission() -> Result<()> { "--owner", GOVERNANCE_ADDRESS, "--token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; let mut client = run!(test, Bin::Client, query_balance_args, Some(30))?; - client.exp_string("XAN: 0")?; + client.exp_string("NAM: 0")?; client.assert_success(); // // 14. Query parameters @@ -1469,7 +1476,7 @@ fn proposal_offline() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -1901,7 +1908,7 @@ fn test_genesis_validators() -> Result<()> { let bg_validator_0 = validator_0.background(); let bg_validator_1 = validator_1.background(); - let bg_non_validator = non_validator.background(); + let _bg_non_validator = non_validator.background(); // 4. Submit a valid token transfer tx let validator_one_rpc = get_actor_rpc(&test, &Who::Validator(0)); @@ -1912,7 +1919,7 @@ fn test_genesis_validators() -> Result<()> { "--target", validator_1_alias, "--token", - XAN, + NAM, "--amount", "10.1", "--fee-amount", @@ -1920,7 +1927,7 @@ fn test_genesis_validators() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; @@ -1933,12 +1940,42 @@ fn test_genesis_validators() -> Result<()> { // 3. Check that all the nodes processed the tx with the same result let mut validator_0 = bg_validator_0.foreground(); let mut validator_1 = bg_validator_1.foreground(); - let mut non_validator = bg_non_validator.foreground(); let expected_result = "all VPs accepted transaction"; + // We cannot check this on non-validator node as it might sync without + // applying the tx itself, but its state should be the same, checked below. validator_0.exp_string(expected_result)?; validator_1.exp_string(expected_result)?; - non_validator.exp_string(expected_result)?; + let _bg_validator_0 = validator_0.background(); + let _bg_validator_1 = validator_1.background(); + + let validator_0_rpc = get_actor_rpc(&test, &Who::Validator(0)); + let validator_1_rpc = get_actor_rpc(&test, &Who::Validator(1)); + let non_validator_rpc = get_actor_rpc(&test, &Who::NonValidator); + + // Find the block height on the validator + let after_tx_height = get_height(&test, &validator_0_rpc)?; + + // Wait for the non-validator to be synced to at least the same height + wait_for_block_height(&test, &non_validator_rpc, after_tx_height, 10)?; + + let query_balance_args = |ledger_rpc| { + vec![ + "balance", + "--owner", + validator_1_alias, + "--token", + NAM, + "--ledger-address", + ledger_rpc, + ] + }; + for ledger_rpc in &[validator_0_rpc, validator_1_rpc, non_validator_rpc] { + let mut client = + run!(test, Bin::Client, query_balance_args(ledger_rpc), Some(40))?; + client.exp_string("NAM: 1000000000010.1")?; + client.assert_success(); + } Ok(()) } @@ -2060,7 +2097,7 @@ fn double_signing_gets_slashed() -> Result<()> { "--target", ALBERT, "--token", - XAN, + NAM, "--amount", "10.1", "--fee-amount", @@ -2068,7 +2105,7 @@ fn double_signing_gets_slashed() -> Result<()> { "--gas-limit", "0", "--fee-token", - XAN, + NAM, "--ledger-address", &validator_one_rpc, ]; diff --git a/tests/src/e2e/setup.rs b/tests/src/e2e/setup.rs index c80f2c4d13e..ae28e5b8efb 100644 --- a/tests/src/e2e/setup.rs +++ b/tests/src/e2e/setup.rs @@ -797,7 +797,7 @@ pub mod constants { pub const GOVERNANCE_ADDRESS: &str = "governance"; // Fungible token addresses - pub const XAN: &str = "XAN"; + pub const NAM: &str = "NAM"; pub const BTC: &str = "BTC"; pub const ETH: &str = "ETH"; pub const DOT: &str = "DOT"; diff --git a/tx_prelude/Cargo.toml b/tx_prelude/Cargo.toml index 992a2146e17..11449b94608 100644 --- a/tx_prelude/Cargo.toml +++ b/tx_prelude/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_tx_prelude" resolver = "2" -version = "0.8.1" +version = "0.9.0" [features] default = [] diff --git a/tx_prelude/src/governance.rs b/tx_prelude/src/governance.rs index 2dbab74a9e5..bb0e6cb6f9e 100644 --- a/tx_prelude/src/governance.rs +++ b/tx_prelude/src/governance.rs @@ -2,7 +2,7 @@ use namada::ledger::governance::storage; use namada::ledger::governance::vp::ADDRESS as governance_address; -use namada::types::address::xan as m1t; +use namada::types::address::nam; use namada::types::token::Amount; use namada::types::transaction::governance::{ InitProposalData, VoteProposalData, @@ -59,7 +59,7 @@ pub fn init_proposal(ctx: &mut Ctx, data: InitProposalData) -> TxResult { ctx, &data.author, &governance_address, - &m1t(), + &nam(), None, min_proposal_funds, ) diff --git a/tx_prelude/src/lib.rs b/tx_prelude/src/lib.rs index 730adb3155b..b512472a808 100644 --- a/tx_prelude/src/lib.rs +++ b/tx_prelude/src/lib.rs @@ -8,7 +8,6 @@ pub mod governance; pub mod ibc; -pub mod nft; pub mod proof_of_stake; pub mod token; diff --git a/tx_prelude/src/nft.rs b/tx_prelude/src/nft.rs deleted file mode 100644 index 4ed179fe277..00000000000 --- a/tx_prelude/src/nft.rs +++ /dev/null @@ -1,89 +0,0 @@ -use namada::types::address::Address; -use namada::types::nft; -use namada::types::nft::NftToken; -use namada::types::transaction::nft::{CreateNft, MintNft}; - -use super::*; - -/// Initialize a new NFT token address. -pub fn init_nft(ctx: &mut Ctx, nft: CreateNft) -> EnvResult
{ - let address = ctx.init_account(&nft.vp_code)?; - - // write tag - let tag_key = nft::get_tag_key(&address); - ctx.write(&tag_key, &nft.tag)?; - - // write creator - let creator_key = nft::get_creator_key(&address); - ctx.write(&creator_key, &nft.creator)?; - - // write keys - let keys_key = nft::get_keys_key(&address); - ctx.write(&keys_key, &nft.keys)?; - - // write optional keys - let optional_keys_key = nft::get_optional_keys_key(&address); - ctx.write(&optional_keys_key, nft.opt_keys)?; - - // mint tokens - aux_mint_token(ctx, &address, &nft.creator, nft.tokens, &nft.creator)?; - - ctx.insert_verifier(&nft.creator)?; - - Ok(address) -} - -pub fn mint_tokens(ctx: &mut Ctx, nft: MintNft) -> TxResult { - aux_mint_token(ctx, &nft.address, &nft.creator, nft.tokens, &nft.creator) -} - -fn aux_mint_token( - ctx: &mut Ctx, - nft_address: &Address, - creator_address: &Address, - tokens: Vec, - verifier: &Address, -) -> TxResult { - for token in tokens { - // write token metadata - let metadata_key = - nft::get_token_metadata_key(nft_address, &token.id.to_string()); - ctx.write(&metadata_key, &token.metadata)?; - - // write current owner token as creator - let current_owner_key = nft::get_token_current_owner_key( - nft_address, - &token.id.to_string(), - ); - ctx.write( - ¤t_owner_key, - &token - .current_owner - .unwrap_or_else(|| creator_address.clone()), - )?; - - // write value key - let value_key = - nft::get_token_value_key(nft_address, &token.id.to_string()); - ctx.write(&value_key, &token.values)?; - - // write optional value keys - let optional_value_key = nft::get_token_optional_value_key( - nft_address, - &token.id.to_string(), - ); - ctx.write(&optional_value_key, &token.opt_values)?; - - // write approval addresses - let approval_key = - nft::get_token_approval_key(nft_address, &token.id.to_string()); - ctx.write(&approval_key, &token.approvals)?; - - // write burnt propriety - let burnt_key = - nft::get_token_burnt_key(nft_address, &token.id.to_string()); - ctx.write(&burnt_key, token.burnt)?; - } - ctx.insert_verifier(verifier)?; - Ok(()) -} diff --git a/vm_env/Cargo.toml b/vm_env/Cargo.toml index f2c8854f851..d3da9c79e0b 100644 --- a/vm_env/Cargo.toml +++ b/vm_env/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_vm_env" resolver = "2" -version = "0.8.1" +version = "0.9.0" [features] default = ["abciplus"] diff --git a/vp_prelude/Cargo.toml b/vp_prelude/Cargo.toml index f270a17d9fb..15a0741a810 100644 --- a/vp_prelude/Cargo.toml +++ b/vp_prelude/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_vp_prelude" resolver = "2" -version = "0.8.1" +version = "0.9.0" [features] default = [] diff --git a/vp_prelude/src/lib.rs b/vp_prelude/src/lib.rs index e6618bc5de7..6bffb07d7eb 100644 --- a/vp_prelude/src/lib.rs +++ b/vp_prelude/src/lib.rs @@ -7,7 +7,6 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod key; -pub mod nft; pub mod token; // used in the VP input diff --git a/vp_prelude/src/nft.rs b/vp_prelude/src/nft.rs deleted file mode 100644 index 1d5d0191690..00000000000 --- a/vp_prelude/src/nft.rs +++ /dev/null @@ -1,116 +0,0 @@ -//! NFT validity predicate - -use std::collections::BTreeSet; - -use namada::ledger::native_vp::VpEnv; -use namada::types::address::Address; -pub use namada::types::nft::*; -use namada::types::storage::Key; - -use super::{accept, reject, Ctx, EnvResult, VpResult}; - -enum KeyType { - Metadata(Address, String), - Approval(Address, String), - CurrentOwner(Address, String), - Creator(Address), - PastOwners(Address, String), - Unknown, -} - -pub fn vp( - ctx: &Ctx, - _tx_da_ta: Vec, - nft_address: &Address, - keys_changed: &BTreeSet, - verifiers: &BTreeSet
, -) -> VpResult { - for key in keys_changed { - match get_key_type(key, nft_address) { - KeyType::Creator(_creator_addr) => { - super::log_string("creator cannot be changed."); - return reject(); - } - KeyType::Approval(nft_address, token_id) => { - super::log_string(format!( - "nft vp, checking approvals with token id: {}", - token_id - )); - - if !(is_creator(ctx, &nft_address, verifiers)? - || is_approved( - ctx, - &nft_address, - token_id.as_ref(), - verifiers, - )?) - { - return reject(); - } - } - KeyType::Metadata(nft_address, token_id) => { - super::log_string(format!( - "nft vp, checking if metadata changed: {}", - token_id - )); - if !is_creator(ctx, &nft_address, verifiers)? { - return reject(); - } - } - _ => { - if !is_creator(ctx, nft_address, verifiers)? { - return reject(); - } - } - } - } - accept() -} - -fn is_approved( - ctx: &Ctx, - nft_address: &Address, - nft_token_id: &str, - verifiers: &BTreeSet
, -) -> EnvResult { - let approvals_key = get_token_approval_key(nft_address, nft_token_id); - let approval_addresses: Vec
= - ctx.read_pre(&approvals_key)?.unwrap_or_default(); - return Ok(approval_addresses - .iter() - .any(|addr| verifiers.contains(addr))); -} - -fn is_creator( - ctx: &Ctx, - nft_address: &Address, - verifiers: &BTreeSet
, -) -> EnvResult { - let creator_key = get_creator_key(nft_address); - let creator_address: Address = ctx.read_pre(&creator_key)?.unwrap(); - Ok(verifiers.contains(&creator_address)) -} - -fn get_key_type(key: &Key, nft_address: &Address) -> KeyType { - let is_creator_key = is_nft_creator_key(key, nft_address); - let is_metadata_key = is_nft_metadata_key(key, nft_address); - let is_approval_key = is_nft_approval_key(key, nft_address); - let is_current_owner_key = is_nft_current_owner_key(key, nft_address); - let is_past_owner_key = is_nft_past_owners_key(key, nft_address); - if let Some(nft_address) = is_creator_key { - return KeyType::Creator(nft_address); - } - if let Some((nft_address, token_id)) = is_metadata_key { - return KeyType::Metadata(nft_address, token_id); - } - if let Some((nft_address, token_id)) = is_approval_key { - return KeyType::Approval(nft_address, token_id); - } - if let Some((nft_address, token_id)) = is_current_owner_key { - return KeyType::CurrentOwner(nft_address, token_id); - } - if let Some((nft_address, token_id)) = is_past_owner_key { - return KeyType::PastOwners(nft_address, token_id); - } - KeyType::Unknown -} diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index e8a72f19d2b..b8dd38f4176 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -1471,10 +1471,11 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.8.1" +version = "0.9.0" dependencies = [ "ark-bls12-381", "ark-serialize", + "async-trait", "bech32", "borsh", "chrono", @@ -1495,12 +1496,14 @@ dependencies = [ "namada_proof_of_stake", "num-rational", "parity-wasm", + "paste", "proptest", "prost", "prost-types", "pwasm-utils", "rand", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde", "serde_json", @@ -1525,7 +1528,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.8.1" +version = "0.9.0" dependencies = [ "quote", "syn", @@ -1533,7 +1536,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "derivative", @@ -1543,7 +1546,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.8.1" +version = "0.9.0" dependencies = [ "chrono", "concat-idents", @@ -1562,7 +1565,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -1574,7 +1577,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -1582,7 +1585,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -1594,7 +1597,7 @@ dependencies = [ [[package]] name = "namada_wasm" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "getrandom", @@ -2549,7 +2552,7 @@ dependencies = [ [[package]] name = "tendermint" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "async-trait", "bytes", @@ -2577,7 +2580,7 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "derive_more", "flex-error", @@ -2589,7 +2592,7 @@ dependencies = [ [[package]] name = "tendermint-proto" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "bytes", "flex-error", @@ -2606,7 +2609,7 @@ dependencies = [ [[package]] name = "tendermint-testgen" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "ed25519-dalek", "gumdrop", @@ -2764,7 +2767,7 @@ dependencies = [ [[package]] name = "tx_template" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "getrandom", @@ -2829,7 +2832,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vp_template" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "getrandom", diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml index d6f164a445e..086fa05caee 100644 --- a/wasm/Cargo.toml +++ b/wasm/Cargo.toml @@ -14,10 +14,10 @@ borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223 borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} # patched to a commit on the `eth-bridge-integration` branch of our fork -tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} +tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} # patched to a commit on the `eth-bridge-integration` branch of our fork ibc = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"} diff --git a/wasm/checksums.json b/wasm/checksums.json index 19b8b54b524..04cd993f91f 100644 --- a/wasm/checksums.json +++ b/wasm/checksums.json @@ -1,19 +1,16 @@ { - "tx_bond.wasm": "tx_bond.482d444214f61b51df344da56f24cd0ca392e699e18c49d2d27b9f108ef643bd.wasm", + "tx_bond.wasm": "tx_bond.270e089c71433a61cf2da72417aaf1e0bb21b2a34f0d2dae29c70b21c0133caa.wasm", "tx_bridge_pool.wasm": "tx_bridge_pool.e21563260c03cfdab1f195878f49bf93722027ad26fcd097cfebbc5c4d279082.wasm", - "tx_ibc.wasm": "tx_ibc.b5a3bf6ca1dea0767406d64251928816a7d95b974cff09f6e702a8f3fbd64b1f.wasm", - "tx_init_account.wasm": "tx_init_account.343e04328e157514ec85cfb650cad5cad659eac27e80b1a0dec61286352a3c9d.wasm", - "tx_init_nft.wasm": "tx_init_nft.ea5ace3004d4d63b6a648bf2d16c94c95da65f85c4bc20a77f940b9cdfe346e9.wasm", - "tx_init_proposal.wasm": "tx_init_proposal.c8e187e2bd7869253f9d9d7de5fa2cb5896e9ca114f124ad800131c9e82db1a7.wasm", - "tx_init_validator.wasm": "tx_init_validator.23b5d73ff65718b5bc9f51423fad36c176dcde9e1006fc7c37cd8e64aae9b8b3.wasm", - "tx_mint_nft.wasm": "tx_mint_nft.110baf82d92f78ca740750808237ecd4793e24b662876f363f51c5d1cd030694.wasm", - "tx_transfer.wasm": "tx_transfer.07335720d2ab07311219a81fd6baca5801792dc16b7c9bab490e2b756257a6dd.wasm", - "tx_unbond.wasm": "tx_unbond.f8879ee80dadf71bd663d46abbd39b47b4c59a3603d29465cf8cff1acbdfa9d3.wasm", - "tx_update_vp.wasm": "tx_update_vp.d2743de89548f3ae6decf2a32ab086e960be9b954bdd24bd6e8e731195449540.wasm", - "tx_vote_proposal.wasm": "tx_vote_proposal.348c3c28fc9e7356a7106f4b037a0bc7b6b207761b000ad0e0cb786678afbee5.wasm", - "tx_withdraw.wasm": "tx_withdraw.a2a0a3f9eb961cba5bb4d1677805bafdcc807637fbd203f8afaa2aa2adb6857e.wasm", - "vp_nft.wasm": "vp_nft.e88e46e49cbbc28dd1fc4e518195bffc4d1feb43b4976d02580865298fd29e75.wasm", - "vp_testnet_faucet.wasm": "vp_testnet_faucet.f6b3d44133b0c35cdbfbe19328d8cdfc62809bd30a0c64eef57f3877cc7e8c2f.wasm", - "vp_token.wasm": "vp_token.2100aaa1fed90d35e87aace6516794facdd7aab3062102c1a762bef604c98075.wasm", - "vp_user.wasm": "vp_user.14fdcbaa1bd3c28115a3eb1f53802b4042080bb255e276b15d2fb16338aacf31.wasm" + "tx_ibc.wasm": "tx_ibc.08919fb46124efd7116214c5ea65c3b4130f0eab3eac17849c3807eb21198f7d.wasm", + "tx_init_account.wasm": "tx_init_account.ec26f77dbdd4a40c65c22c1d88ada359c06d262b6b4833f2e7cdf8574e934e4d.wasm", + "tx_init_proposal.wasm": "tx_init_proposal.e1bbf5715597be42e6bcf8a633de49a5b61b4797cdd599551b9de4df17fad3af.wasm", + "tx_init_validator.wasm": "tx_init_validator.0388157980a96fd7d76072182fe21a978e1ba913db657202412b93c8614d8a10.wasm", + "tx_transfer.wasm": "tx_transfer.b6f60e5a944309ccc640fe5db81a4c9fe24b213bf2cd82cce82799287f112018.wasm", + "tx_unbond.wasm": "tx_unbond.4acb715528fe067791d433ce133226b9af44a199b7289cd5df34cbf6e3b9c5d1.wasm", + "tx_update_vp.wasm": "tx_update_vp.b4100e61f9960c56eac90db86cfc6200927dabe499ac43cea284605020117b20.wasm", + "tx_vote_proposal.wasm": "tx_vote_proposal.29d1c9f2c4a9b9cd9a8cc0c9b8f99cad1eeea0b3a21945e25250729bbc028c80.wasm", + "tx_withdraw.wasm": "tx_withdraw.a7b18837c92156a11328193df4206bcef9e9fc7860cdc7ed11deb588e233417d.wasm", + "vp_testnet_faucet.wasm": "vp_testnet_faucet.6642fff3ad3b7754b24a99ae649ded7bb8926d6523c26c78fd0e8f354cbb7046.wasm", + "vp_token.wasm": "vp_token.0e88ed7ff14ddcc02e450b06af2693482451a15e647d75a462dc5e42ac69b19b.wasm", + "vp_user.wasm": "vp_user.acc77f94e833d5ff70ac2f85aed9a39b70d877f28172ff7040bb7a79727b3012.wasm" } \ No newline at end of file diff --git a/wasm/tx_template/Cargo.toml b/wasm/tx_template/Cargo.toml index a86dcbb07a0..038afdc4885 100644 --- a/wasm/tx_template/Cargo.toml +++ b/wasm/tx_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "tx_template" resolver = "2" -version = "0.8.1" +version = "0.9.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/vp_template/Cargo.toml b/wasm/vp_template/Cargo.toml index ca5ff03922e..037fa2590a7 100644 --- a/wasm/vp_template/Cargo.toml +++ b/wasm/vp_template/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "vp_template" resolver = "2" -version = "0.8.1" +version = "0.9.0" [lib] crate-type = ["cdylib"] diff --git a/wasm/wasm_source/Cargo.toml b/wasm/wasm_source/Cargo.toml index 5190e9e5984..6b64c0276f2 100644 --- a/wasm/wasm_source/Cargo.toml +++ b/wasm/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm" resolver = "2" -version = "0.8.1" +version = "0.9.0" [lib] crate-type = ["cdylib"] @@ -17,16 +17,13 @@ tx_bridge_pool = ["namada_tx_prelude"] tx_from_intent = ["namada_tx_prelude"] tx_ibc = ["namada_tx_prelude"] tx_init_account = ["namada_tx_prelude"] -tx_init_nft = ["namada_tx_prelude"] tx_init_proposal = ["namada_tx_prelude"] tx_init_validator = ["namada_tx_prelude"] -tx_mint_nft = ["namada_tx_prelude"] tx_transfer = ["namada_tx_prelude"] tx_unbond = ["namada_tx_prelude"] tx_update_vp = ["namada_tx_prelude"] tx_vote_proposal = ["namada_tx_prelude"] tx_withdraw = ["namada_tx_prelude"] -vp_nft = ["namada_vp_prelude"] vp_testnet_faucet = ["namada_vp_prelude", "once_cell"] vp_token = ["namada_vp_prelude"] vp_user = ["namada_vp_prelude", "once_cell", "rust_decimal"] diff --git a/wasm/wasm_source/Makefile b/wasm/wasm_source/Makefile index 2ac254ed197..92b80763fcf 100644 --- a/wasm/wasm_source/Makefile +++ b/wasm/wasm_source/Makefile @@ -9,16 +9,13 @@ wasms := tx_bond wasms += tx_bridge_pool wasms += tx_ibc wasms += tx_init_account -wasms += tx_init_nft wasms += tx_init_validator wasms += tx_init_proposal -wasms += tx_mint_nft wasms += tx_vote_proposal wasms += tx_transfer wasms += tx_unbond wasms += tx_update_vp wasms += tx_withdraw -wasms += vp_nft wasms += vp_testnet_faucet wasms += vp_token wasms += vp_user diff --git a/wasm/wasm_source/src/lib.rs b/wasm/wasm_source/src/lib.rs index 89299927548..9075c601539 100644 --- a/wasm/wasm_source/src/lib.rs +++ b/wasm/wasm_source/src/lib.rs @@ -4,14 +4,10 @@ pub mod tx_bond; pub mod tx_ibc; #[cfg(feature = "tx_init_account")] pub mod tx_init_account; -#[cfg(feature = "tx_init_nft")] -pub mod tx_init_nft; #[cfg(feature = "tx_init_proposal")] pub mod tx_init_proposal; #[cfg(feature = "tx_init_validator")] pub mod tx_init_validator; -#[cfg(feature = "tx_mint_nft")] -pub mod tx_mint_nft; #[cfg(feature = "tx_transfer")] pub mod tx_transfer; #[cfg(feature = "tx_unbond")] @@ -22,8 +18,6 @@ pub mod tx_update_vp; pub mod tx_vote_proposal; #[cfg(feature = "tx_withdraw")] pub mod tx_withdraw; -#[cfg(feature = "vp_nft")] -pub mod vp_nft; #[cfg(feature = "vp_testnet_faucet")] pub mod vp_testnet_faucet; #[cfg(feature = "vp_token")] diff --git a/wasm/wasm_source/src/tx_init_nft.rs b/wasm/wasm_source/src/tx_init_nft.rs deleted file mode 100644 index de67dfbb53d..00000000000 --- a/wasm/wasm_source/src/tx_init_nft.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! A tx to initialize a new NFT account. - -use namada_tx_prelude::*; - -#[transaction] -fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { - let signed = SignedTxData::try_from_slice(&tx_data[..]) - .wrap_err("failed to decode SignedTxData")?; - let data = signed.data.ok_or_err_msg("Missing data")?; - let tx_data = transaction::nft::CreateNft::try_from_slice(&data[..]) - .wrap_err("failed to decode CreateNft")?; - log_string("apply_tx called to create a new NFT"); - - let _address = nft::init_nft(ctx, tx_data)?; - Ok(()) -} diff --git a/wasm/wasm_source/src/tx_mint_nft.rs b/wasm/wasm_source/src/tx_mint_nft.rs deleted file mode 100644 index d3ab17e7ad5..00000000000 --- a/wasm/wasm_source/src/tx_mint_nft.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! A tx to mint new NFT token(s). - -use namada_tx_prelude::*; - -#[transaction] -fn apply_tx(ctx: &mut Ctx, tx_data: Vec) -> TxResult { - let signed = SignedTxData::try_from_slice(&tx_data[..]) - .wrap_err("failed to decode SignedTxData")?; - let data = signed.data.ok_or_err_msg("Missing data")?; - let tx_data = transaction::nft::MintNft::try_from_slice(&data[..]) - .wrap_err("failed to decode MintNft")?; - log_string("apply_tx called to mint a new NFT tokens"); - - nft::mint_tokens(ctx, tx_data) -} diff --git a/wasm/wasm_source/src/vp_nft.rs b/wasm/wasm_source/src/vp_nft.rs deleted file mode 100644 index 77a9df83067..00000000000 --- a/wasm/wasm_source/src/vp_nft.rs +++ /dev/null @@ -1,500 +0,0 @@ -//! A VP for a nft. - -use namada_vp_prelude::*; - -#[validity_predicate] -fn validate_tx( - ctx: &Ctx, - tx_data: Vec, - addr: Address, - keys_changed: BTreeSet, - verifiers: BTreeSet
, -) -> VpResult { - log_string(format!( - "validate_tx called with token addr: {}, key_changed: {:#?}, \ - verifiers: {:?}", - addr, keys_changed, verifiers - )); - - if !is_valid_tx(ctx, &tx_data)? { - return reject(); - } - - let vp_check = keys_changed.iter().all(|key| { - if key.is_validity_predicate().is_some() { - match ctx.read_bytes_post(key) { - Ok(Some(vp)) => { - matches!(is_vp_whitelisted(ctx, &vp), Ok(true)) - } - _ => false, - } - } else { - true - } - }); - - Ok(vp_check && nft::vp(ctx, tx_data, &addr, &keys_changed, &verifiers)?) -} - -#[cfg(test)] -mod tests { - use namada::types::nft::{self, NftToken}; - use namada::types::transaction::nft::{CreateNft, MintNft}; - use namada_tests::log::test; - use namada_tests::tx::{self, tx_host_env, TestTxEnv}; - use namada_tests::vp::*; - use namada_tx_prelude::{StorageWrite, TxEnv}; - - use super::*; - - const VP_ALWAYS_TRUE_WASM: &str = - "../../wasm_for_tests/vp_always_true.wasm"; - - /// Test that no-op transaction (i.e. no storage modifications) accepted. - #[test] - fn test_no_op_transaction() { - let mut tx_env = TestTxEnv::default(); - - let nft_creator = address::testing::established_address_2(); - tx_env.spawn_accounts([&nft_creator]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.write_log.commit_tx(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |address| { - // Apply transfer in a transaction - tx::ctx().insert_verifier(address).unwrap() - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - assert!( - validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that you can create an nft without tokens - #[test] - fn test_mint_no_tokens() { - let mut tx_env = TestTxEnv::default(); - - let nft_creator = address::testing::established_address_2(); - tx_env.spawn_accounts([&nft_creator]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.write_log.commit_tx(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |address| { - // Apply transfer in a transaction - tx_host_env::nft::mint_tokens( - tx::ctx(), - MintNft { - address: nft_address.clone(), - tokens: vec![], - creator: nft_creator.clone(), - }, - ) - .unwrap(); - tx::ctx().insert_verifier(address).unwrap() - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - - assert!( - validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that you can create an nft with tokens - #[test] - fn test_mint_tokens() { - let mut tx_env = TestTxEnv::default(); - - let nft_creator = address::testing::established_address_2(); - let nft_token_owner = address::testing::established_address_1(); - tx_env.spawn_accounts([&nft_creator, &nft_token_owner]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - // Apply transfer in a transaction - tx_host_env::nft::mint_tokens( - tx::ctx(), - MintNft { - address: nft_address.clone(), - creator: nft_creator.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![], - current_owner: Some(nft_token_owner.clone()), - past_owners: vec![], - burnt: false, - }], - }, - ) - .unwrap(); - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - - assert!( - validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that only owner can mint new tokens - #[test] - fn test_mint_tokens_wrong_owner() { - let mut tx_env = TestTxEnv::default(); - - let nft_creator = address::testing::established_address_2(); - let nft_token_owner = address::testing::established_address_1(); - tx_env.spawn_accounts([&nft_creator, &nft_token_owner]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - // Apply transfer in a transaction - tx_host_env::nft::mint_tokens( - tx::ctx(), - MintNft { - address: nft_address.clone(), - creator: nft_token_owner.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![], - current_owner: Some(nft_token_owner.clone()), - past_owners: vec![], - burnt: false, - }], - }, - ) - .unwrap(); - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - - assert!( - !validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that an approval can add another approval - #[test] - fn test_mint_tokens_with_approvals_authorized() { - let mut tx_env = TestTxEnv::default(); - - let nft_creator = address::testing::established_address_2(); - let nft_token_owner = address::testing::established_address_1(); - let nft_token_approval = address::testing::established_address_3(); - let nft_token_approval_2 = address::testing::established_address_4(); - tx_env.spawn_accounts([ - &nft_creator, - &nft_token_owner, - &nft_token_approval, - &nft_token_approval_2, - ]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - tx_host_env::set(tx_env); - tx_host_env::nft::mint_tokens( - tx::ctx(), - MintNft { - address: nft_address.clone(), - creator: nft_creator.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![nft_token_approval.clone()], - current_owner: None, - past_owners: vec![], - burnt: false, - }], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - let approval_key = nft::get_token_approval_key(&nft_address, "1"); - tx::ctx() - .write( - &approval_key, - [&nft_token_approval_2, &nft_token_approval], - ) - .unwrap(); - tx::ctx().insert_verifier(&nft_token_approval).unwrap(); - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - - assert!( - validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test that an approval can add another approval - #[test] - fn test_mint_tokens_with_approvals_not_authorized() { - let mut tx_env = TestTxEnv::default(); - - let nft_creator = address::testing::established_address_2(); - let nft_token_owner = address::testing::established_address_1(); - let nft_token_approval = address::testing::established_address_3(); - let nft_token_approval_2 = address::testing::established_address_4(); - tx_env.spawn_accounts([ - &nft_creator, - &nft_token_owner, - &nft_token_approval, - &nft_token_approval_2, - ]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_creator.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - tx_host_env::set(tx_env); - tx_host_env::nft::mint_tokens( - tx::ctx(), - MintNft { - address: nft_address.clone(), - creator: nft_creator.clone(), - tokens: vec![NftToken { - id: 1, - values: vec![], - opt_values: vec![], - metadata: "".to_string(), - approvals: vec![nft_token_approval.clone()], - current_owner: None, - past_owners: vec![], - burnt: false, - }], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - let approval_key = nft::get_token_approval_key(&nft_address, "1"); - tx::ctx() - .write( - &approval_key, - [&nft_token_approval_2, &nft_token_approval], - ) - .unwrap(); - tx::ctx().insert_verifier(&nft_token_approval_2).unwrap(); - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - - assert!( - !validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } - - /// Test nft address cannot be changed - #[test] - fn test_cant_change_owner() { - let mut tx_env = TestTxEnv::default(); - - let nft_owner = address::testing::established_address_2(); - let another_address = address::testing::established_address_1(); - tx_env.spawn_accounts([&nft_owner, &another_address]); - - // just a dummy vp, its not used during testing - let vp_code = - std::fs::read(VP_ALWAYS_TRUE_WASM).expect("cannot load wasm"); - - tx_host_env::set(tx_env); - let nft_address = tx_host_env::nft::init_nft( - tx::ctx(), - CreateNft { - tag: "v1".to_string(), - creator: nft_owner.clone(), - vp_code, - keys: vec![], - opt_keys: vec![], - tokens: vec![], - }, - ) - .unwrap(); - - let mut tx_env = tx_host_env::take(); - tx_env.commit_tx_and_block(); - - vp_host_env::init_from_tx(nft_address.clone(), tx_env, |_| { - let creator_key = nft::get_creator_key(&nft_address); - tx::ctx().write(&creator_key, &another_address).unwrap(); - }); - - let vp_env = vp_host_env::take(); - let tx_data: Vec = vec![]; - let keys_changed: BTreeSet = - vp_env.all_touched_storage_keys(); - let verifiers: BTreeSet
= vp_env.get_verifiers(); - vp_host_env::set(vp_env); - - assert!( - !validate_tx(&CTX, tx_data, nft_address, keys_changed, verifiers) - .unwrap() - ); - } -} diff --git a/wasm/wasm_source/src/vp_testnet_faucet.rs b/wasm/wasm_source/src/vp_testnet_faucet.rs index 9582791565a..35e2dd8fbc1 100644 --- a/wasm/wasm_source/src/vp_testnet_faucet.rs +++ b/wasm/wasm_source/src/vp_testnet_faucet.rs @@ -131,7 +131,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let source = address::testing::established_address_2(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(10_098_123); // Spawn the accounts to be able to modify their storage @@ -267,7 +267,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let target = address::testing::established_address_2(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(amount); // Spawn the accounts to be able to modify their storage @@ -300,7 +300,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let target = address::testing::established_address_2(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(amount); // Spawn the accounts to be able to modify their storage diff --git a/wasm/wasm_source/src/vp_user.rs b/wasm/wasm_source/src/vp_user.rs index 256dc6bb17a..9ccfa972d0a 100644 --- a/wasm/wasm_source/src/vp_user.rs +++ b/wasm/wasm_source/src/vp_user.rs @@ -15,7 +15,6 @@ use once_cell::unsync::Lazy; enum KeyType<'a> { Token(&'a Address), PoS, - Nft(&'a Address), Vp(&'a Address), GovernanceVote(&'a Address), Unknown, @@ -31,8 +30,6 @@ impl<'a> From<&'a storage::Key> for KeyType<'a> { Self::Token(address) } else if proof_of_stake::is_pos_key(key) { Self::PoS - } else if let Some(address) = nft::is_nft_key(key) { - Self::Nft(address) } else if gov_storage::is_vote_key(key) { let voter_address = gov_storage::get_voter_address(key); if let Some(address) = voter_address { @@ -141,13 +138,6 @@ fn validate_tx( ); valid } - KeyType::Nft(owner) => { - if owner == &addr { - *valid_sig - } else { - true - } - } KeyType::GovernanceVote(voter) => { if voter == &addr { *valid_sig @@ -231,7 +221,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let source = address::testing::established_address_2(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(10_098_123); // Spawn the accounts to be able to modify their storage @@ -275,7 +265,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let target = address::testing::established_address_2(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(10_098_123); // Spawn the accounts to be able to modify their storage @@ -321,7 +311,7 @@ mod tests { let keypair = key::testing::keypair_1(); let public_key = keypair.ref_to(); let target = address::testing::established_address_2(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(10_098_123); // Spawn the accounts to be able to modify their storage @@ -371,7 +361,7 @@ mod tests { let vp_owner = address::testing::established_address_1(); let source = address::testing::established_address_2(); let target = address::testing::established_address_3(); - let token = address::xan(); + let token = address::nam(); let amount = token::Amount::from(10_098_123); // Spawn the accounts to be able to modify their storage diff --git a/wasm_for_tests/tx_memory_limit.wasm b/wasm_for_tests/tx_memory_limit.wasm index ba3d6078773..73ccad15877 100755 Binary files a/wasm_for_tests/tx_memory_limit.wasm and b/wasm_for_tests/tx_memory_limit.wasm differ diff --git a/wasm_for_tests/tx_mint_tokens.wasm b/wasm_for_tests/tx_mint_tokens.wasm index b8ddaa495e7..bacdc15953a 100755 Binary files a/wasm_for_tests/tx_mint_tokens.wasm and b/wasm_for_tests/tx_mint_tokens.wasm differ diff --git a/wasm_for_tests/tx_proposal_code.wasm b/wasm_for_tests/tx_proposal_code.wasm index 038ec12d9a7..842332c6867 100755 Binary files a/wasm_for_tests/tx_proposal_code.wasm and b/wasm_for_tests/tx_proposal_code.wasm differ diff --git a/wasm_for_tests/tx_read_storage_key.wasm b/wasm_for_tests/tx_read_storage_key.wasm index 1c1f04b74b1..5ac2b4be017 100755 Binary files a/wasm_for_tests/tx_read_storage_key.wasm and b/wasm_for_tests/tx_read_storage_key.wasm differ diff --git a/wasm_for_tests/tx_write_storage_key.wasm b/wasm_for_tests/tx_write_storage_key.wasm index da08e72616f..1e640ff9c99 100755 Binary files a/wasm_for_tests/tx_write_storage_key.wasm and b/wasm_for_tests/tx_write_storage_key.wasm differ diff --git a/wasm_for_tests/vp_always_false.wasm b/wasm_for_tests/vp_always_false.wasm index cdb840e4c2b..6c163184b3f 100755 Binary files a/wasm_for_tests/vp_always_false.wasm and b/wasm_for_tests/vp_always_false.wasm differ diff --git a/wasm_for_tests/vp_always_true.wasm b/wasm_for_tests/vp_always_true.wasm index 59eb75aa64d..250c422f9be 100755 Binary files a/wasm_for_tests/vp_always_true.wasm and b/wasm_for_tests/vp_always_true.wasm differ diff --git a/wasm_for_tests/vp_eval.wasm b/wasm_for_tests/vp_eval.wasm index 846197f3700..4cd354369ef 100755 Binary files a/wasm_for_tests/vp_eval.wasm and b/wasm_for_tests/vp_eval.wasm differ diff --git a/wasm_for_tests/vp_memory_limit.wasm b/wasm_for_tests/vp_memory_limit.wasm index 08e79ffa73c..dd7c2dc59d5 100755 Binary files a/wasm_for_tests/vp_memory_limit.wasm and b/wasm_for_tests/vp_memory_limit.wasm differ diff --git a/wasm_for_tests/vp_read_storage_key.wasm b/wasm_for_tests/vp_read_storage_key.wasm index 72b1d7efe63..786cdba428c 100755 Binary files a/wasm_for_tests/vp_read_storage_key.wasm and b/wasm_for_tests/vp_read_storage_key.wasm differ diff --git a/wasm_for_tests/wasm_source/Cargo.lock b/wasm_for_tests/wasm_source/Cargo.lock index 60d87332706..4c125ddb187 100644 --- a/wasm_for_tests/wasm_source/Cargo.lock +++ b/wasm_for_tests/wasm_source/Cargo.lock @@ -1471,10 +1471,11 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "namada" -version = "0.8.1" +version = "0.9.0" dependencies = [ "ark-bls12-381", "ark-serialize", + "async-trait", "bech32", "borsh", "chrono", @@ -1495,12 +1496,14 @@ dependencies = [ "namada_proof_of_stake", "num-rational", "parity-wasm", + "paste", "proptest", "prost", "prost-types", "pwasm-utils", "rand", "rand_core 0.6.4", + "rayon", "rust_decimal", "serde", "serde_json", @@ -1525,7 +1528,7 @@ dependencies = [ [[package]] name = "namada_macros" -version = "0.8.1" +version = "0.9.0" dependencies = [ "quote", "syn", @@ -1533,7 +1536,7 @@ dependencies = [ [[package]] name = "namada_proof_of_stake" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "derivative", @@ -1543,7 +1546,7 @@ dependencies = [ [[package]] name = "namada_tests" -version = "0.8.1" +version = "0.9.0" dependencies = [ "chrono", "concat-idents", @@ -1562,7 +1565,7 @@ dependencies = [ [[package]] name = "namada_tx_prelude" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -1574,7 +1577,7 @@ dependencies = [ [[package]] name = "namada_vm_env" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -1582,7 +1585,7 @@ dependencies = [ [[package]] name = "namada_vp_prelude" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "namada", @@ -1594,7 +1597,7 @@ dependencies = [ [[package]] name = "namada_wasm_for_tests" -version = "0.8.1" +version = "0.9.0" dependencies = [ "borsh", "getrandom", @@ -2543,7 +2546,7 @@ dependencies = [ [[package]] name = "tendermint" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "async-trait", "bytes", @@ -2571,7 +2574,7 @@ dependencies = [ [[package]] name = "tendermint-light-client-verifier" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "derive_more", "flex-error", @@ -2583,7 +2586,7 @@ dependencies = [ [[package]] name = "tendermint-proto" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "bytes", "flex-error", @@ -2600,7 +2603,7 @@ dependencies = [ [[package]] name = "tendermint-testgen" version = "0.23.6" -source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=87be41b8c9cc2850830f4d8028c1fe1bd9f96284#87be41b8c9cc2850830f4d8028c1fe1bd9f96284" +source = "git+https://github.com/heliaxdev/tendermint-rs.git?rev=e6c684731f21bffd89886d3e91074b96aee074ba#e6c684731f21bffd89886d3e91074b96aee074ba" dependencies = [ "ed25519-dalek", "gumdrop", diff --git a/wasm_for_tests/wasm_source/Cargo.toml b/wasm_for_tests/wasm_source/Cargo.toml index 90cb8ebb586..c4e1c1806a7 100644 --- a/wasm_for_tests/wasm_source/Cargo.toml +++ b/wasm_for_tests/wasm_source/Cargo.toml @@ -4,7 +4,7 @@ edition = "2021" license = "GPL-3.0" name = "namada_wasm_for_tests" resolver = "2" -version = "0.8.1" +version = "0.9.0" [lib] crate-type = ["cdylib"] @@ -38,10 +38,10 @@ borsh-derive = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223 borsh-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} borsh-schema-derive-internal = {git = "https://github.com/heliaxdev/borsh-rs.git", rev = "cd5223e5103c4f139e0c54cf8259b7ec5ec4073a"} # patched to a commit on the `eth-bridge-integration` branch of our fork -tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} -tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "87be41b8c9cc2850830f4d8028c1fe1bd9f96284"} +tendermint = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-proto = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-testgen = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} +tendermint-light-client-verifier = {git = "https://github.com/heliaxdev/tendermint-rs.git", rev = "e6c684731f21bffd89886d3e91074b96aee074ba"} # patched to a commit on the `eth-bridge-integration` branch of our fork ibc = {git = "https://github.com/heliaxdev/ibc-rs.git", rev = "f4703dfe2c1f25cc431279ab74f10f3e0f6827e2"}